diff --git a/LICENSES/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE b/LICENSES/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE deleted file mode 100644 index 38e1cbfbe7b9f..0000000000000 --- a/LICENSES/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE +++ /dev/null @@ -1,31 +0,0 @@ -= vendor/github.com/grpc-ecosystem/grpc-gateway licensed under: = - -Copyright (c) 2015, Gengo, Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the name of Gengo, Inc. nor the names of its - contributors may be used to endorse or promote products derived from this - software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -= vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt c510a2a01572b82d27f28fd4d02ca318 diff --git a/LICENSES/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE b/LICENSES/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE index fbff3301de860..6fc04d5b6ccc9 100644 --- a/LICENSES/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE +++ b/LICENSES/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE @@ -28,4 +28,4 @@ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -= vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt c510a2a01572b82d27f28fd4d02ca318 += vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE c510a2a01572b82d27f28fd4d02ca318 diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/LICENSE b/LICENSES/vendor/github.com/klauspost/compress/LICENSE similarity index 66% rename from vendor/github.com/grpc-ecosystem/go-grpc-middleware/LICENSE rename to LICENSES/vendor/github.com/klauspost/compress/LICENSE index b2b065037fc4d..5202785e3077c 100644 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/LICENSE +++ b/LICENSES/vendor/github.com/klauspost/compress/LICENSE @@ -1,4 +1,39 @@ - Apache License += vendor/github.com/klauspost/compress licensed under: = + +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2019 Klaus Post. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------ + +Files: gzhttp/* + + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -186,7 +221,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright 2016-2017 The New York Times Company Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -198,4 +233,76 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file + limitations under the License. + +------------------ + +Files: s2/cmd/internal/readahead/* + +The MIT License (MIT) + +Copyright (c) 2015 Klaus Post + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--------------------- +Files: snappy/* +Files: internal/snapref/* + +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +----------------- + +Files: s2/cmd/internal/filepathx/* + +Copyright 2016 The filepathx Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + += vendor/github.com/klauspost/compress/LICENSE d0fd9ebda39468b51ff4539c9fbb13a8 diff --git a/LICENSES/vendor/google.golang.org/genproto/LICENSE b/LICENSES/vendor/github.com/kylelemons/godebug/LICENSE similarity index 98% rename from LICENSES/vendor/google.golang.org/genproto/LICENSE rename to LICENSES/vendor/github.com/kylelemons/godebug/LICENSE index 74b70837603d6..d682abdd3a284 100644 --- a/LICENSES/vendor/google.golang.org/genproto/LICENSE +++ b/LICENSES/vendor/github.com/kylelemons/godebug/LICENSE @@ -1,4 +1,4 @@ -= vendor/google.golang.org/genproto licensed under: = += vendor/github.com/kylelemons/godebug licensed under: = Apache License @@ -203,4 +203,4 @@ See the License for the specific language governing permissions and limitations under the License. -= vendor/google.golang.org/genproto/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 += vendor/github.com/kylelemons/godebug/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 diff --git a/LICENSES/vendor/go.etcd.io/etcd/raft/v3/LICENSE b/LICENSES/vendor/go.etcd.io/raft/v3/LICENSE similarity index 98% rename from LICENSES/vendor/go.etcd.io/etcd/raft/v3/LICENSE rename to LICENSES/vendor/go.etcd.io/raft/v3/LICENSE index df46e75ab4746..bd5bf31c762fb 100644 --- a/LICENSES/vendor/go.etcd.io/etcd/raft/v3/LICENSE +++ b/LICENSES/vendor/go.etcd.io/raft/v3/LICENSE @@ -1,4 +1,4 @@ -= vendor/go.etcd.io/etcd/raft/v3 licensed under: = += vendor/go.etcd.io/raft/v3 licensed under: = Apache License @@ -203,4 +203,4 @@ See the License for the specific language governing permissions and limitations under the License. -= vendor/go.etcd.io/etcd/raft/v3/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 += vendor/go.etcd.io/raft/v3/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 diff --git a/LICENSES/vendor/google.golang.org/genproto/googleapis/api/LICENSE b/LICENSES/vendor/google.golang.org/genproto/googleapis/api/LICENSE index a54497395700a..af8155c426308 100644 --- a/LICENSES/vendor/google.golang.org/genproto/googleapis/api/LICENSE +++ b/LICENSES/vendor/google.golang.org/genproto/googleapis/api/LICENSE @@ -203,4 +203,4 @@ See the License for the specific language governing permissions and limitations under the License. -= vendor/google.golang.org/genproto/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 += vendor/google.golang.org/genproto/googleapis/api/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 diff --git a/LICENSES/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE b/LICENSES/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE index 25604c718df3e..2b433834ec08b 100644 --- a/LICENSES/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE +++ b/LICENSES/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE @@ -203,4 +203,4 @@ See the License for the specific language governing permissions and limitations under the License. -= vendor/google.golang.org/genproto/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 += vendor/google.golang.org/genproto/googleapis/rpc/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 diff --git a/cmd/kubeadm/app/util/etcd/etcd.go b/cmd/kubeadm/app/util/etcd/etcd.go index 5b6f62eaa33b8..1a16ab843a62e 100644 --- a/cmd/kubeadm/app/util/etcd/etcd.go +++ b/cmd/kubeadm/app/util/etcd/etcd.go @@ -71,7 +71,7 @@ type etcdClient interface { Endpoints() []string // MemberList lists the current cluster membership. - MemberList(ctx context.Context) (*clientv3.MemberListResponse, error) + MemberList(ctx context.Context, opts ...clientv3.OpOption) (*clientv3.MemberListResponse, error) // MemberAdd adds a new member into the cluster. MemberAdd(ctx context.Context, peerAddrs []string) (*clientv3.MemberAddResponse, error) diff --git a/cmd/kubeadm/app/util/etcd/etcd_test.go b/cmd/kubeadm/app/util/etcd/etcd_test.go index cbdba0f2955ad..c51cb516a0b8b 100644 --- a/cmd/kubeadm/app/util/etcd/etcd_test.go +++ b/cmd/kubeadm/app/util/etcd/etcd_test.go @@ -28,6 +28,7 @@ import ( pb "go.etcd.io/etcd/api/v3/etcdserverpb" clientv3 "go.etcd.io/etcd/client/v3" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" clientsetfake "k8s.io/client-go/kubernetes/fake" @@ -57,7 +58,7 @@ func (f *fakeEtcdClient) Endpoints() []string { } // MemberList lists the current cluster membership. -func (f *fakeEtcdClient) MemberList(_ context.Context) (*clientv3.MemberListResponse, error) { +func (f *fakeEtcdClient) MemberList(_ context.Context, _ ...clientv3.OpOption) (*clientv3.MemberListResponse, error) { return &clientv3.MemberListResponse{ Members: f.members, }, nil diff --git a/go.mod b/go.mod index e7cd06096a863..43e0c5ef864ce 100644 --- a/go.mod +++ b/go.mod @@ -52,37 +52,37 @@ require ( github.com/opencontainers/selinux v1.11.1 github.com/pkg/errors v0.9.1 github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 - github.com/prometheus/client_golang v1.19.1 + github.com/prometheus/client_golang v1.20.5 github.com/prometheus/client_model v0.6.1 - github.com/prometheus/common v0.55.0 + github.com/prometheus/common v0.60.1 github.com/robfig/cron/v3 v3.0.1 github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.9.0 github.com/vishvananda/netlink v1.3.1-0.20240905180732-b1ce50cfa9be github.com/vishvananda/netns v0.0.4 - go.etcd.io/etcd/api/v3 v3.5.16 - go.etcd.io/etcd/client/pkg/v3 v3.5.16 - go.etcd.io/etcd/client/v3 v3.5.16 + go.etcd.io/etcd/api/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd + go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd + go.etcd.io/etcd/client/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful v0.42.0 - go.opentelemetry.io/otel v1.28.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 - go.opentelemetry.io/otel/metric v1.28.0 - go.opentelemetry.io/otel/sdk v1.28.0 - go.opentelemetry.io/otel/trace v1.28.0 + go.opentelemetry.io/otel v1.32.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 + go.opentelemetry.io/otel/metric v1.32.0 + go.opentelemetry.io/otel/sdk v1.32.0 + go.opentelemetry.io/otel/trace v1.32.0 go.opentelemetry.io/proto/otlp v1.3.1 go.uber.org/goleak v1.3.0 go.uber.org/zap v1.27.0 golang.org/x/crypto v0.28.0 golang.org/x/net v0.30.0 golang.org/x/oauth2 v0.23.0 - golang.org/x/sync v0.8.0 - golang.org/x/sys v0.26.0 + golang.org/x/sync v0.9.0 + golang.org/x/sys v0.27.0 golang.org/x/term v0.25.0 - golang.org/x/time v0.7.0 + golang.org/x/time v0.8.0 golang.org/x/tools v0.26.0 - google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 - google.golang.org/grpc v1.65.0 + google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 + google.golang.org/grpc v1.68.0 google.golang.org/protobuf v1.35.1 gopkg.in/evanphx/json-patch.v4 v4.12.0 gopkg.in/square/go-jose.v2 v2.6.0 @@ -154,21 +154,21 @@ require ( github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/swag v0.23.0 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect - github.com/golang-jwt/jwt/v4 v4.5.0 // indirect - github.com/google/btree v1.0.1 // indirect + github.com/golang-jwt/jwt/v4 v4.5.1 // indirect + github.com/google/btree v1.1.3 // indirect github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect - github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect - github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jonboulle/clockwork v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/karrick/godirwalk v1.17.0 // indirect + github.com/klauspost/compress v1.17.9 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible // indirect @@ -195,20 +195,19 @@ require ( github.com/x448/float16 v0.8.4 // indirect github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510 // indirect github.com/xlab/treeprint v1.2.0 // indirect - go.etcd.io/bbolt v1.3.11 // indirect - go.etcd.io/etcd/client/v2 v2.305.16 // indirect - go.etcd.io/etcd/pkg/v3 v3.5.16 // indirect - go.etcd.io/etcd/raft/v3 v3.5.16 // indirect - go.etcd.io/etcd/server/v3 v3.5.16 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect + go.etcd.io/bbolt v1.4.0-beta.0 // indirect + go.etcd.io/etcd/client/v2 v2.306.0-alpha.0 // indirect + go.etcd.io/etcd/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd // indirect + go.etcd.io/etcd/server/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd // indirect + go.etcd.io/raft/v3 v3.6.0-alpha.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect golang.org/x/mod v0.21.0 // indirect - golang.org/x/text v0.19.0 // indirect - google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect + golang.org/x/text v0.20.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index 21e2dbed8cbbf..57db5a5358842 100644 --- a/go.sum +++ b/go.sum @@ -2,128 +2,10 @@ bitbucket.org/bertimus9/systemstat v0.5.0 h1:n0aLnh2Jo4nBUBym9cE5PJDG8GT6g+4VuS2 bitbucket.org/bertimus9/systemstat v0.5.0/go.mod h1:EkUWPp8lKFPMXP8vnbpT5JDI0W/sTiLZAvN8ONWErHY= cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo= cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4= -cloud.google.com/go/accessapproval v1.7.4/go.mod h1:/aTEh45LzplQgFYdQdwPMR9YdX0UlhBmvB84uAmQKUc= -cloud.google.com/go/accesscontextmanager v1.8.4/go.mod h1:ParU+WbMpD34s5JFEnGAnPBYAgUHozaTmDJU7aCU9+M= -cloud.google.com/go/aiplatform v1.58.0/go.mod h1:pwZMGvqe0JRkI1GWSZCtnAfrR4K1bv65IHILGA//VEU= -cloud.google.com/go/analytics v0.22.0/go.mod h1:eiROFQKosh4hMaNhF85Oc9WO97Cpa7RggD40e/RBy8w= -cloud.google.com/go/apigateway v1.6.4/go.mod h1:0EpJlVGH5HwAN4VF4Iec8TAzGN1aQgbxAWGJsnPCGGY= -cloud.google.com/go/apigeeconnect v1.6.4/go.mod h1:CapQCWZ8TCjnU0d7PobxhpOdVz/OVJ2Hr/Zcuu1xFx0= -cloud.google.com/go/apigeeregistry v0.8.2/go.mod h1:h4v11TDGdeXJDJvImtgK2AFVvMIgGWjSb0HRnBSjcX8= -cloud.google.com/go/appengine v1.8.4/go.mod h1:TZ24v+wXBujtkK77CXCpjZbnuTvsFNT41MUaZ28D6vg= -cloud.google.com/go/area120 v0.8.4/go.mod h1:jfawXjxf29wyBXr48+W+GyX/f8fflxp642D/bb9v68M= -cloud.google.com/go/artifactregistry v1.14.6/go.mod h1:np9LSFotNWHcjnOgh8UVK0RFPCTUGbO0ve3384xyHfE= -cloud.google.com/go/asset v1.17.0/go.mod h1:yYLfUD4wL4X589A9tYrv4rFrba0QlDeag0CMcM5ggXU= -cloud.google.com/go/assuredworkloads v1.11.4/go.mod h1:4pwwGNwy1RP0m+y12ef3Q/8PaiWrIDQ6nD2E8kvWI9U= -cloud.google.com/go/automl v1.13.4/go.mod h1:ULqwX/OLZ4hBVfKQaMtxMSTlPx0GqGbWN8uA/1EqCP8= -cloud.google.com/go/baremetalsolution v1.2.3/go.mod h1:/UAQ5xG3faDdy180rCUv47e0jvpp3BFxT+Cl0PFjw5g= -cloud.google.com/go/batch v1.7.0/go.mod h1:J64gD4vsNSA2O5TtDB5AAux3nJ9iV8U3ilg3JDBYejU= -cloud.google.com/go/beyondcorp v1.0.3/go.mod h1:HcBvnEd7eYr+HGDd5ZbuVmBYX019C6CEXBonXbCVwJo= -cloud.google.com/go/bigquery v1.58.0/go.mod h1:0eh4mWNY0KrBTjUzLjoYImapGORq9gEPT7MWjCy9lik= -cloud.google.com/go/billing v1.18.0/go.mod h1:5DOYQStCxquGprqfuid/7haD7th74kyMBHkjO/OvDtk= -cloud.google.com/go/binaryauthorization v1.8.0/go.mod h1:VQ/nUGRKhrStlGr+8GMS8f6/vznYLkdK5vaKfdCIpvU= -cloud.google.com/go/certificatemanager v1.7.4/go.mod h1:FHAylPe/6IIKuaRmHbjbdLhGhVQ+CWHSD5Jq0k4+cCE= -cloud.google.com/go/channel v1.17.4/go.mod h1:QcEBuZLGGrUMm7kNj9IbU1ZfmJq2apotsV83hbxX7eE= -cloud.google.com/go/cloudbuild v1.15.0/go.mod h1:eIXYWmRt3UtggLnFGx4JvXcMj4kShhVzGndL1LwleEM= -cloud.google.com/go/clouddms v1.7.3/go.mod h1:fkN2HQQNUYInAU3NQ3vRLkV2iWs8lIdmBKOx4nrL6Hc= -cloud.google.com/go/cloudtasks v1.12.4/go.mod h1:BEPu0Gtt2dU6FxZHNqqNdGqIG86qyWKBPGnsb7udGY0= cloud.google.com/go/compute v1.25.1/go.mod h1:oopOIR53ly6viBYxaDhBfJwzUAxf1zE//uf3IB011ls= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= -cloud.google.com/go/contactcenterinsights v1.12.1/go.mod h1:HHX5wrz5LHVAwfI2smIotQG9x8Qd6gYilaHcLLLmNis= -cloud.google.com/go/container v1.29.0/go.mod h1:b1A1gJeTBXVLQ6GGw9/9M4FG94BEGsqJ5+t4d/3N7O4= -cloud.google.com/go/containeranalysis v0.11.3/go.mod h1:kMeST7yWFQMGjiG9K7Eov+fPNQcGhb8mXj/UcTiWw9U= -cloud.google.com/go/datacatalog v1.19.2/go.mod h1:2YbODwmhpLM4lOFe3PuEhHK9EyTzQJ5AXgIy7EDKTEE= -cloud.google.com/go/dataflow v0.9.4/go.mod h1:4G8vAkHYCSzU8b/kmsoR2lWyHJD85oMJPHMtan40K8w= -cloud.google.com/go/dataform v0.9.1/go.mod h1:pWTg+zGQ7i16pyn0bS1ruqIE91SdL2FDMvEYu/8oQxs= -cloud.google.com/go/datafusion v1.7.4/go.mod h1:BBs78WTOLYkT4GVZIXQCZT3GFpkpDN4aBY4NDX/jVlM= -cloud.google.com/go/datalabeling v0.8.4/go.mod h1:Z1z3E6LHtffBGrNUkKwbwbDxTiXEApLzIgmymj8A3S8= -cloud.google.com/go/dataplex v1.14.0/go.mod h1:mHJYQQ2VEJHsyoC0OdNyy988DvEbPhqFs5OOLffLX0c= -cloud.google.com/go/dataproc/v2 v2.3.0/go.mod h1:G5R6GBc9r36SXv/RtZIVfB8SipI+xVn0bX5SxUzVYbY= -cloud.google.com/go/dataqna v0.8.4/go.mod h1:mySRKjKg5Lz784P6sCov3p1QD+RZQONRMRjzGNcFd0c= -cloud.google.com/go/datastore v1.15.0/go.mod h1:GAeStMBIt9bPS7jMJA85kgkpsMkvseWWXiaHya9Jes8= -cloud.google.com/go/datastream v1.10.3/go.mod h1:YR0USzgjhqA/Id0Ycu1VvZe8hEWwrkjuXrGbzeDOSEA= -cloud.google.com/go/deploy v1.17.0/go.mod h1:XBr42U5jIr64t92gcpOXxNrqL2PStQCXHuKK5GRUuYo= -cloud.google.com/go/dialogflow v1.48.1/go.mod h1:C1sjs2/g9cEwjCltkKeYp3FFpz8BOzNondEaAlCpt+A= -cloud.google.com/go/dlp v1.11.1/go.mod h1:/PA2EnioBeXTL/0hInwgj0rfsQb3lpE3R8XUJxqUNKI= -cloud.google.com/go/documentai v1.23.7/go.mod h1:ghzBsyVTiVdkfKaUCum/9bGBEyBjDO4GfooEcYKhN+g= -cloud.google.com/go/domains v0.9.4/go.mod h1:27jmJGShuXYdUNjyDG0SodTfT5RwLi7xmH334Gvi3fY= -cloud.google.com/go/edgecontainer v1.1.4/go.mod h1:AvFdVuZuVGdgaE5YvlL1faAoa1ndRR/5XhXZvPBHbsE= -cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= -cloud.google.com/go/essentialcontacts v1.6.5/go.mod h1:jjYbPzw0x+yglXC890l6ECJWdYeZ5dlYACTFL0U/VuM= -cloud.google.com/go/eventarc v1.13.3/go.mod h1:RWH10IAZIRcj1s/vClXkBgMHwh59ts7hSWcqD3kaclg= -cloud.google.com/go/filestore v1.8.0/go.mod h1:S5JCxIbFjeBhWMTfIYH2Jx24J6BqjwpkkPl+nBA5DlI= -cloud.google.com/go/firestore v1.14.0/go.mod h1:96MVaHLsEhbvkBEdZgfN+AS/GIkco1LRpH9Xp9YZfzQ= -cloud.google.com/go/functions v1.15.4/go.mod h1:CAsTc3VlRMVvx+XqXxKqVevguqJpnVip4DdonFsX28I= -cloud.google.com/go/gkebackup v1.3.4/go.mod h1:gLVlbM8h/nHIs09ns1qx3q3eaXcGSELgNu1DWXYz1HI= -cloud.google.com/go/gkeconnect v0.8.4/go.mod h1:84hZz4UMlDCKl8ifVW8layK4WHlMAFeq8vbzjU0yJkw= -cloud.google.com/go/gkehub v0.14.4/go.mod h1:Xispfu2MqnnFt8rV/2/3o73SK1snL8s9dYJ9G2oQMfc= -cloud.google.com/go/gkemulticloud v1.1.0/go.mod h1:7NpJBN94U6DY1xHIbsDqB2+TFZUfjLUKLjUX8NGLor0= -cloud.google.com/go/gsuiteaddons v1.6.4/go.mod h1:rxtstw7Fx22uLOXBpsvb9DUbC+fiXs7rF4U29KHM/pE= -cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8= -cloud.google.com/go/iap v1.9.3/go.mod h1:DTdutSZBqkkOm2HEOTBzhZxh2mwwxshfD/h3yofAiCw= -cloud.google.com/go/ids v1.4.4/go.mod h1:z+WUc2eEl6S/1aZWzwtVNWoSZslgzPxAboS0lZX0HjI= -cloud.google.com/go/iot v1.7.4/go.mod h1:3TWqDVvsddYBG++nHSZmluoCAVGr1hAcabbWZNKEZLk= -cloud.google.com/go/kms v1.15.5/go.mod h1:cU2H5jnp6G2TDpUGZyqTCoy1n16fbubHZjmVXSMtwDI= -cloud.google.com/go/language v1.12.2/go.mod h1:9idWapzr/JKXBBQ4lWqVX/hcadxB194ry20m/bTrhWc= -cloud.google.com/go/lifesciences v0.9.4/go.mod h1:bhm64duKhMi7s9jR9WYJYvjAFJwRqNj+Nia7hF0Z7JA= -cloud.google.com/go/logging v1.9.0/go.mod h1:1Io0vnZv4onoUnsVUQY3HZ3Igb1nBchky0A0y7BBBhE= -cloud.google.com/go/longrunning v0.5.4/go.mod h1:zqNVncI0BOP8ST6XQD1+VcvuShMmq7+xFSzOL++V0dI= -cloud.google.com/go/managedidentities v1.6.4/go.mod h1:WgyaECfHmF00t/1Uk8Oun3CQ2PGUtjc3e9Alh79wyiM= -cloud.google.com/go/maps v1.6.3/go.mod h1:VGAn809ADswi1ASofL5lveOHPnE6Rk/SFTTBx1yuOLw= -cloud.google.com/go/mediatranslation v0.8.4/go.mod h1:9WstgtNVAdN53m6TQa5GjIjLqKQPXe74hwSCxUP6nj4= -cloud.google.com/go/memcache v1.10.4/go.mod h1:v/d8PuC8d1gD6Yn5+I3INzLR01IDn0N4Ym56RgikSI0= -cloud.google.com/go/metastore v1.13.3/go.mod h1:K+wdjXdtkdk7AQg4+sXS8bRrQa9gcOr+foOMF2tqINE= -cloud.google.com/go/monitoring v1.17.0/go.mod h1:KwSsX5+8PnXv5NJnICZzW2R8pWTis8ypC4zmdRD63Tw= -cloud.google.com/go/networkconnectivity v1.14.3/go.mod h1:4aoeFdrJpYEXNvrnfyD5kIzs8YtHg945Og4koAjHQek= -cloud.google.com/go/networkmanagement v1.9.3/go.mod h1:y7WMO1bRLaP5h3Obm4tey+NquUvB93Co1oh4wpL+XcU= -cloud.google.com/go/networksecurity v0.9.4/go.mod h1:E9CeMZ2zDsNBkr8axKSYm8XyTqNhiCHf1JO/Vb8mD1w= -cloud.google.com/go/notebooks v1.11.2/go.mod h1:z0tlHI/lREXC8BS2mIsUeR3agM1AkgLiS+Isov3SS70= -cloud.google.com/go/optimization v1.6.2/go.mod h1:mWNZ7B9/EyMCcwNl1frUGEuY6CPijSkz88Fz2vwKPOY= -cloud.google.com/go/orchestration v1.8.4/go.mod h1:d0lywZSVYtIoSZXb0iFjv9SaL13PGyVOKDxqGxEf/qI= -cloud.google.com/go/orgpolicy v1.12.0/go.mod h1:0+aNV/nrfoTQ4Mytv+Aw+stBDBjNf4d8fYRA9herfJI= -cloud.google.com/go/osconfig v1.12.4/go.mod h1:B1qEwJ/jzqSRslvdOCI8Kdnp0gSng0xW4LOnIebQomA= -cloud.google.com/go/oslogin v1.13.0/go.mod h1:xPJqLwpTZ90LSE5IL1/svko+6c5avZLluiyylMb/sRA= -cloud.google.com/go/phishingprotection v0.8.4/go.mod h1:6b3kNPAc2AQ6jZfFHioZKg9MQNybDg4ixFd4RPZZ2nE= -cloud.google.com/go/policytroubleshooter v1.10.2/go.mod h1:m4uF3f6LseVEnMV6nknlN2vYGRb+75ylQwJdnOXfnv0= -cloud.google.com/go/privatecatalog v0.9.4/go.mod h1:SOjm93f+5hp/U3PqMZAHTtBtluqLygrDrVO8X8tYtG0= -cloud.google.com/go/pubsub v1.34.0/go.mod h1:alj4l4rBg+N3YTFDDC+/YyFTs6JAjam2QfYsddcAW4c= -cloud.google.com/go/pubsublite v1.8.1/go.mod h1:fOLdU4f5xldK4RGJrBMm+J7zMWNj/k4PxwEZXy39QS0= -cloud.google.com/go/recaptchaenterprise/v2 v2.9.0/go.mod h1:Dak54rw6lC2gBY8FBznpOCAR58wKf+R+ZSJRoeJok4w= -cloud.google.com/go/recommendationengine v0.8.4/go.mod h1:GEteCf1PATl5v5ZsQ60sTClUE0phbWmo3rQ1Js8louU= -cloud.google.com/go/recommender v1.12.0/go.mod h1:+FJosKKJSId1MBFeJ/TTyoGQZiEelQQIZMKYYD8ruK4= -cloud.google.com/go/redis v1.14.1/go.mod h1:MbmBxN8bEnQI4doZPC1BzADU4HGocHBk2de3SbgOkqs= -cloud.google.com/go/resourcemanager v1.9.4/go.mod h1:N1dhP9RFvo3lUfwtfLWVxfUWq8+KUQ+XLlHLH3BoFJ0= -cloud.google.com/go/resourcesettings v1.6.4/go.mod h1:pYTTkWdv2lmQcjsthbZLNBP4QW140cs7wqA3DuqErVI= -cloud.google.com/go/retail v1.14.4/go.mod h1:l/N7cMtY78yRnJqp5JW8emy7MB1nz8E4t2yfOmklYfg= -cloud.google.com/go/run v1.3.3/go.mod h1:WSM5pGyJ7cfYyYbONVQBN4buz42zFqwG67Q3ch07iK4= -cloud.google.com/go/scheduler v1.10.5/go.mod h1:MTuXcrJC9tqOHhixdbHDFSIuh7xZF2IysiINDuiq6NI= -cloud.google.com/go/secretmanager v1.11.4/go.mod h1:wreJlbS9Zdq21lMzWmJ0XhWW2ZxgPeahsqeV/vZoJ3w= -cloud.google.com/go/security v1.15.4/go.mod h1:oN7C2uIZKhxCLiAAijKUCuHLZbIt/ghYEo8MqwD/Ty4= -cloud.google.com/go/securitycenter v1.24.3/go.mod h1:l1XejOngggzqwr4Fa2Cn+iWZGf+aBLTXtB/vXjy5vXM= -cloud.google.com/go/servicedirectory v1.11.3/go.mod h1:LV+cHkomRLr67YoQy3Xq2tUXBGOs5z5bPofdq7qtiAw= -cloud.google.com/go/shell v1.7.4/go.mod h1:yLeXB8eKLxw0dpEmXQ/FjriYrBijNsONpwnWsdPqlKM= -cloud.google.com/go/spanner v1.55.0/go.mod h1:HXEznMUVhC+PC+HDyo9YFG2Ajj5BQDkcbqB9Z2Ffxi0= -cloud.google.com/go/speech v1.21.0/go.mod h1:wwolycgONvfz2EDU8rKuHRW3+wc9ILPsAWoikBEWavY= -cloud.google.com/go/storagetransfer v1.10.3/go.mod h1:Up8LY2p6X68SZ+WToswpQbQHnJpOty/ACcMafuey8gc= -cloud.google.com/go/talent v1.6.5/go.mod h1:Mf5cma696HmE+P2BWJ/ZwYqeJXEeU0UqjHFXVLadEDI= -cloud.google.com/go/texttospeech v1.7.4/go.mod h1:vgv0002WvR4liGuSd5BJbWy4nDn5Ozco0uJymY5+U74= -cloud.google.com/go/tpu v1.6.4/go.mod h1:NAm9q3Rq2wIlGnOhpYICNI7+bpBebMJbh0yyp3aNw1Y= -cloud.google.com/go/trace v1.10.4/go.mod h1:Nso99EDIK8Mj5/zmB+iGr9dosS/bzWCJ8wGmE6TXNWY= -cloud.google.com/go/translate v1.10.0/go.mod h1:Kbq9RggWsbqZ9W5YpM94Q1Xv4dshw/gr/SHfsl5yCZ0= -cloud.google.com/go/video v1.20.3/go.mod h1:TnH/mNZKVHeNtpamsSPygSR0iHtvrR/cW1/GDjN5+GU= -cloud.google.com/go/videointelligence v1.11.4/go.mod h1:kPBMAYsTPFiQxMLmmjpcZUMklJp3nC9+ipJJtprccD8= -cloud.google.com/go/vision/v2 v2.7.5/go.mod h1:GcviprJLFfK9OLf0z8Gm6lQb6ZFUulvpZws+mm6yPLM= -cloud.google.com/go/vmmigration v1.7.4/go.mod h1:yBXCmiLaB99hEl/G9ZooNx2GyzgsjKnw5fWcINRgD70= -cloud.google.com/go/vmwareengine v1.0.3/go.mod h1:QSpdZ1stlbfKtyt6Iu19M6XRxjmXO+vb5a/R6Fvy2y4= -cloud.google.com/go/vpcaccess v1.7.4/go.mod h1:lA0KTvhtEOb/VOdnH/gwPuOzGgM+CWsmGu6bb4IoMKk= -cloud.google.com/go/webrisk v1.9.4/go.mod h1:w7m4Ib4C+OseSr2GL66m0zMBywdrVNTDKsdEsfMl7X0= -cloud.google.com/go/websecurityscanner v1.6.4/go.mod h1:mUiyMQ+dGpPPRkHgknIZeCzSHJ45+fY4F52nZFDHm2o= -cloud.google.com/go/workflows v1.12.3/go.mod h1:fmOUeeqEwPzIU81foMjTRQIdwQHADi/vEr1cx9R1m5g= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab h1:UKkYhof1njT1/xq4SEg5z+VpTgjmNeHwPGRQl7takDI= github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab/go.mod h1:3VYc5hodBMJ5+l/7J4xAyMeuM2PNuepvHlGs8yilUCA= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= @@ -164,7 +46,6 @@ github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -173,9 +54,7 @@ github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHe github.com/checkpoint-restore/go-criu/v6 v6.3.0/go.mod h1:rrRTN/uSwY2X+BPRl/gkulo9gsKOSAeVp9/K2tv7xZI= github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= github.com/cilium/ebpf v0.16.0/go.mod h1:L7u2Blt2jMM/vLAVgjxluxtBKlz3/GWjB0dMOEngfwE= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA= github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= github.com/container-storage-interface/spec v1.9.0 h1:zKtX4STsq31Knz3gciCYCi1SXtO2HJDecIjDVboYavY= @@ -224,12 +103,8 @@ github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkp github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/euank/go-kmsg-parser v2.0.0+incompatible h1:cHD53+PLQuuQyLZeriD1V/esuG4MuU0Pjs5y6iknohY= github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw= github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4= @@ -243,12 +118,9 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= @@ -265,7 +137,6 @@ github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -273,27 +144,20 @@ github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= -github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo= +github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/cadvisor v0.51.0 h1:BspqSPdZoLKrnvuZNOvM/KiJ/A+RdixwagN20n+2H8k= github.com/google/cadvisor v0.51.0/go.mod h1:czGE/c/P/i0QFpVNKTFrIEzord9Y10YfpwuaSWXELc0= github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g= github.com/google/cel-go v0.22.0/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs0yC4s8= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -305,7 +169,6 @@ github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAx github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -313,14 +176,11 @@ github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWm github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 h1:ad0vkEBuk23VJzZR9nkLVG0YAoN9coASF1GusYX6AlU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0= github.com/ianlancetaylor/demangle v0.0.0-20240312041847-bd984b5ce465/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= @@ -338,8 +198,8 @@ github.com/karrick/godirwalk v1.17.0 h1:b4kY7nqDdioR/6qnbHQyDvmA17u5G1cZ6J+CZXwS github.com/karrick/godirwalk v1.17.0/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -347,6 +207,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/libopenstorage/openstorage v1.0.0 h1:GLPam7/0mpdP8ZZtKjbfcXJBTIA/T1O6CBErVEFEyIM= github.com/libopenstorage/openstorage v1.0.0/go.mod h1:Sp1sIObHjat1BeXhfMqLZ14wnOzEhNx2YQedreMcUyc= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= @@ -404,37 +266,34 @@ github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/selinux v1.11.1 h1:nHFvthhM0qY8/m+vfhJylliSshm8G1jJ2jDMcgULaH8= github.com/opencontainers/selinux v1.11.1/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/cachecontrol v0.1.0 h1:yJMy84ti9h/+OEWa752kBTKv4XC30OtVVHYv/8cTqKc= github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= +github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/seccomp/libseccomp-golang v0.10.0/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= @@ -447,14 +306,12 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -480,54 +337,50 @@ github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0= -go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I= -go.etcd.io/etcd/api/v3 v3.5.16 h1:WvmyJVbjWqK4R1E+B12RRHz3bRGy9XVfh++MgbN+6n0= -go.etcd.io/etcd/api/v3 v3.5.16/go.mod h1:1P4SlIP/VwkDmGo3OlOD7faPeP8KDIFhqvciH5EfN28= -go.etcd.io/etcd/client/pkg/v3 v3.5.16 h1:ZgY48uH6UvB+/7R9Yf4x574uCO3jIx0TRDyetSfId3Q= -go.etcd.io/etcd/client/pkg/v3 v3.5.16/go.mod h1:V8acl8pcEK0Y2g19YlOV9m9ssUe6MgiDSobSoaBAM0E= -go.etcd.io/etcd/client/v2 v2.305.16 h1:kQrn9o5czVNaukf2A2At43cE9ZtWauOtf9vRZuiKXow= -go.etcd.io/etcd/client/v2 v2.305.16/go.mod h1:h9YxWCzcdvZENbfzBTFCnoNumr2ax3F19sKMqHFmXHE= -go.etcd.io/etcd/client/v3 v3.5.16 h1:sSmVYOAHeC9doqi0gv7v86oY/BTld0SEFGaxsU9eRhE= -go.etcd.io/etcd/client/v3 v3.5.16/go.mod h1:X+rExSGkyqxvu276cr2OwPLBaeqFu1cIl4vmRjAD/50= -go.etcd.io/etcd/pkg/v3 v3.5.16 h1:cnavs5WSPWeK4TYwPYfmcr3Joz9BH+TZ6qoUtz6/+mc= -go.etcd.io/etcd/pkg/v3 v3.5.16/go.mod h1:+lutCZHG5MBBFI/U4eYT5yL7sJfnexsoM20Y0t2uNuY= -go.etcd.io/etcd/raft/v3 v3.5.16 h1:zBXA3ZUpYs1AwiLGPafYAKKl/CORn/uaxYDwlNwndAk= -go.etcd.io/etcd/raft/v3 v3.5.16/go.mod h1:P4UP14AxofMJ/54boWilabqqWoW9eLodl6I5GdGzazI= -go.etcd.io/etcd/server/v3 v3.5.16 h1:d0/SAdJ3vVsZvF8IFVb1k8zqMZ+heGcNfft71ul9GWE= -go.etcd.io/etcd/server/v3 v3.5.16/go.mod h1:ynhyZZpdDp1Gq49jkUg5mfkDWZwXnn3eIqCqtJnrD/s= -go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= +go.etcd.io/bbolt v1.4.0-beta.0 h1:U7Y9yH6ZojEo5/BDFMXDXD1RNx9L7iKxudzqR68jLaM= +go.etcd.io/bbolt v1.4.0-beta.0/go.mod h1:Qv5yHB6jkQESXT/uVfxJgUPMqgAyhL0GLxcQaz9bSec= +go.etcd.io/etcd/api/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd h1:uuyUQSC6O58OfQH7KlKhmIbHjG/l+lTqyq6Y5vXOHcs= +go.etcd.io/etcd/api/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:kiyBidVS9nGwfQcXcXqUndUJkMBcp0AdZ7sXmwSMD2I= +go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd h1:l8QjUfDmZHqyNMTMwrZvIk+xbvZxOkmZjnFkmO9ktl8= +go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:5B0xZc6aoiWt7n0DYA7s3MrhTRfmp07hY7b/TykJQAs= +go.etcd.io/etcd/client/v2 v2.306.0-alpha.0 h1:9VRJ698EFIMfjOQtcjKMM7CWXOIxp9R4I8JA1mk+WT4= +go.etcd.io/etcd/client/v2 v2.306.0-alpha.0/go.mod h1:eW78BCfOzS1HJgTNzDrb2E6xV1p6kqlpLpKkz7ErzCs= +go.etcd.io/etcd/client/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd h1:Bpoj6mDJcLmMExbqoIFoqYYLzIu0bdVf/7XjP5UUs9E= +go.etcd.io/etcd/client/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:5b6FTfh+1atULPdGeukvpK3u5N8GMskQpb0Z9klI6sU= +go.etcd.io/etcd/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd h1:21vB+bra7pRWMRlZvgdDkb9FXExRshiUeDhTBvYIuLs= +go.etcd.io/etcd/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:FL+PMODrygIxX5lpNEyv1y1GKqFTXa0p/XH31SHpbcw= +go.etcd.io/etcd/server/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd h1:8EoX6i7o6cN9I1KbIpgUBVaY9YFQBRtV3wS68rpEf+s= +go.etcd.io/etcd/server/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:SSZTJkOelKEqXz6mwE1J9aSnZB3G7YROnHOGij32Opk= +go.etcd.io/gofail v0.2.0/go.mod h1:nL3ILMGfkXTekKI3clMBNazKnjUZjYLKmBHzsVAnC1o= +go.etcd.io/raft/v3 v3.6.0-alpha.0 h1:cMmjAEjCKMGiQPowjSWM43Y5ZnBEeNP8RSYcm3ewtns= +go.etcd.io/raft/v3 v3.6.0-alpha.0/go.mod h1:QpxpKeYmocQQFHP75LxNrdJTukZmqQig9lotwYLsUJY= go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful v0.42.0 h1:Z6SbqeRZAl2OczfkFOqLx1BeYBDYehNjEnqluD7581Y= go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful v0.42.0/go.mod h1:XiglO+8SPMqM3Mqh5/rtxR1VHc63o8tb38QrU6tm4mU= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 h1:yMkBS9yViCc7U7yeLzJPM2XizlfdVvBRSmsQDWu6qc0= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0/go.mod h1:n8MR6/liuGB5EmTETUBeU5ZgqMOlqKRxUaqPQBOANZ8= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= go.opentelemetry.io/contrib/propagators/b3 v1.17.0 h1:ImOVvHnku8jijXqkwCSyYKRDt2YrnGXD4BbhcpfbfJo= go.opentelemetry.io/contrib/propagators/b3 v1.17.0/go.mod h1:IkfUfMpKWmynvvE0264trz0sf32NRTZL4nuAN9AbWRc= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= +go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= +go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 h1:IJFEoHiytixx8cMiVAO+GmHR6Frwu+u5Ur8njpFO6Ac= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0/go.mod h1:3rHrKNtLIoS0oZwkY2vxi+oJcwFRWdtUyRII+so45p8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 h1:9kV11HXBHZAvuPUZxmMWrH8hZn/6UnHX4K0mu36vNsU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0/go.mod h1:JyA0FHXe22E1NeNiHmVp7kFHglnexDQ7uRWDiiJ1hKQ= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= -go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= +go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= +go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= +go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= +go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= +go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -535,48 +388,30 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20211123203042-d83791d6bcd9/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -585,8 +420,8 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= @@ -594,15 +429,11 @@ golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -612,27 +443,12 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= -google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 h1:M0KvPgPmDZHPlbRbaNU1APr28TvwvvdUPlSv7PUvy8g= +google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/grpc v1.68.0 h1:aHQeeJbo8zAkAa3pRzrVjZlbz6uSfeOXlJNQM0RAbz0= +google.golang.org/grpc v1.68.0/go.mod h1:fmSPC5AsjSBCK54MyHRx48kpOti1/jRfOlwEWywNjWA= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -646,15 +462,11 @@ gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 h1:si3PfKm8dDYxgfbeA6orqrtLkvvIeH8UqffFJDl0bz4= k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= diff --git a/go.work.sum b/go.work.sum index 78665c9725dca..6c017c7ae8284 100644 --- a/go.work.sum +++ b/go.work.sum @@ -1,121 +1,5 @@ -cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM= -cloud.google.com/go/accessapproval v1.7.4 h1:ZvLvJ952zK8pFHINjpMBY5k7LTAp/6pBf50RDMRgBUI= -cloud.google.com/go/accesscontextmanager v1.8.4 h1:Yo4g2XrBETBCqyWIibN3NHNPQKUfQqti0lI+70rubeE= -cloud.google.com/go/aiplatform v1.58.0 h1:xyCAfpI4yUMOQ4VtHN/bdmxPQ8xoEkTwFM1nbVmuQhs= -cloud.google.com/go/analytics v0.22.0 h1:w8KIgW8NRUHFVKjpkwCpLaHsr685tJ+ckPStOaSCZz0= -cloud.google.com/go/apigateway v1.6.4 h1:VVIxCtVerchHienSlaGzV6XJGtEM9828Erzyr3miUGs= -cloud.google.com/go/apigeeconnect v1.6.4 h1:jSoGITWKgAj/ssVogNE9SdsTqcXnryPzsulENSRlusI= -cloud.google.com/go/apigeeregistry v0.8.2 h1:DSaD1iiqvELag+lV4VnnqUUFd8GXELu01tKVdWZrviE= -cloud.google.com/go/appengine v1.8.4 h1:Qub3fqR7iA1daJWdzjp/Q0Jz0fUG0JbMc7Ui4E9IX/E= -cloud.google.com/go/area120 v0.8.4 h1:YnSO8m02pOIo6AEOgiOoUDVbw4pf+bg2KLHi4rky320= -cloud.google.com/go/artifactregistry v1.14.6 h1:/hQaadYytMdA5zBh+RciIrXZQBWK4vN7EUsrQHG+/t8= -cloud.google.com/go/asset v1.17.0 h1:dLWfTnbwyrq/Kt8Tr2JiAbre1MEvS2Bl5cAMiYAy5Pg= -cloud.google.com/go/assuredworkloads v1.11.4 h1:FsLSkmYYeNuzDm8L4YPfLWV+lQaUrJmH5OuD37t1k20= -cloud.google.com/go/automl v1.13.4 h1:i9tOKXX+1gE7+rHpWKjiuPfGBVIYoWvLNIGpWgPtF58= -cloud.google.com/go/baremetalsolution v1.2.3 h1:oQiFYYCe0vwp7J8ZmF6siVKEumWtiPFJMJcGuyDVRUk= -cloud.google.com/go/batch v1.7.0 h1:AxuSPoL2fWn/rUyvWeNCNd0V2WCr+iHRCU9QO1PUmpY= -cloud.google.com/go/beyondcorp v1.0.3 h1:VXf9SnrnSmj2BF2cHkoTHvOUp8gjsz1KJFOMW7czdsY= -cloud.google.com/go/bigquery v1.58.0 h1:drSd9RcPVLJP2iFMimvOB9SCSIrcl+9HD4II03Oy7A0= -cloud.google.com/go/billing v1.18.0 h1:GvKy4xLy1zF1XPbwP5NJb2HjRxhnhxjjXxvyZ1S/IAo= -cloud.google.com/go/binaryauthorization v1.8.0 h1:PHS89lcFayWIEe0/s2jTBiEOtqghCxzc7y7bRNlifBs= -cloud.google.com/go/certificatemanager v1.7.4 h1:5YMQ3Q+dqGpwUZ9X5sipsOQ1fLPsxod9HNq0+nrqc6I= -cloud.google.com/go/channel v1.17.4 h1:yYHOORIM+wkBy3EdwArg/WL7Lg+SoGzlKH9o3Bw2/jE= -cloud.google.com/go/cloudbuild v1.15.0 h1:9IHfEMWdCklJ1cwouoiQrnxmP0q3pH7JUt8Hqx4Qbck= -cloud.google.com/go/clouddms v1.7.3 h1:xe/wJKz55VO1+L891a1EG9lVUgfHr9Ju/I3xh1nwF84= -cloud.google.com/go/cloudtasks v1.12.4 h1:5xXuFfAjg0Z5Wb81j2GAbB3e0bwroCeSF+5jBn/L650= cloud.google.com/go/compute v1.25.1 h1:ZRpHJedLtTpKgr3RV1Fx23NuaAEN1Zfx9hw1u4aJdjU= -cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= -cloud.google.com/go/contactcenterinsights v1.12.1 h1:EiGBeejtDDtr3JXt9W7xlhXyZ+REB5k2tBgVPVtmNb0= -cloud.google.com/go/container v1.29.0 h1:jIltU529R2zBFvP8rhiG1mgeTcnT27KhU0H/1d6SQRg= -cloud.google.com/go/containeranalysis v0.11.3 h1:5rhYLX+3a01drpREqBZVXR9YmWH45RnML++8NsCtuD8= -cloud.google.com/go/datacatalog v1.19.2 h1:BV5sB7fPc8ccv/obwtHwQtCdLMAgI4KyaQWfkh8/mWg= -cloud.google.com/go/dataflow v0.9.4 h1:7VmCNWcPJBS/srN2QnStTB6nu4Eb5TMcpkmtaPVhRt4= -cloud.google.com/go/dataform v0.9.1 h1:jV+EsDamGX6cE127+QAcCR/lergVeeZdEQ6DdrxW3sQ= -cloud.google.com/go/datafusion v1.7.4 h1:Q90alBEYlMi66zL5gMSGQHfbZLB55mOAg03DhwTTfsk= -cloud.google.com/go/datalabeling v0.8.4 h1:zrq4uMmunf2KFDl/7dS6iCDBBAxBnKVDyw6+ajz3yu0= -cloud.google.com/go/dataplex v1.14.0 h1:/WhVTR4v/L6ACKjlz/9CqkxkrVh2z7C44CLMUf0f60A= -cloud.google.com/go/dataproc/v2 v2.3.0 h1:tTVP9tTxmc8fixxOd/8s6Q6Pz/+yzn7r7XdZHretQH0= -cloud.google.com/go/dataqna v0.8.4 h1:NJnu1kAPamZDs/if3bJ3+Wb6tjADHKL83NUWsaIp2zg= -cloud.google.com/go/datastore v1.15.0 h1:0P9WcsQeTWjuD1H14JIY7XQscIPQ4Laje8ti96IC5vg= -cloud.google.com/go/datastream v1.10.3 h1:Z2sKPIB7bT2kMW5Uhxy44ZgdJzxzE5uKjavoW+EuHEE= -cloud.google.com/go/deploy v1.17.0 h1:P3SgJ+4rAktC2XqaI10G0ip/vzWluNBrC5VG0abMbLw= -cloud.google.com/go/dialogflow v1.48.1 h1:1Uq2jDJzjJ3M4xYB608FCCFHfW3JmrTmHIxRSd7JGmY= -cloud.google.com/go/dlp v1.11.1 h1:OFlXedmPP/5//X1hBEeq3D9kUVm9fb6ywYANlpv/EsQ= -cloud.google.com/go/documentai v1.23.7 h1:hlYieOXUwiJ7HpBR/vEPfr8nfSxveLVzbqbUkSK0c/4= -cloud.google.com/go/domains v0.9.4 h1:ua4GvsDztZ5F3xqjeLKVRDeOvJshf5QFgWGg1CKti3A= -cloud.google.com/go/edgecontainer v1.1.4 h1:Szy3Q/N6bqgQGyxqjI+6xJZbmvPvnFHp3UZr95DKcQ0= -cloud.google.com/go/errorreporting v0.3.0 h1:kj1XEWMu8P0qlLhm3FwcaFsUvXChV/OraZwA70trRR0= -cloud.google.com/go/essentialcontacts v1.6.5 h1:S2if6wkjR4JCEAfDtIiYtD+sTz/oXjh2NUG4cgT1y/Q= -cloud.google.com/go/eventarc v1.13.3 h1:+pFmO4eu4dOVipSaFBLkmqrRYG94Xl/TQZFOeohkuqU= -cloud.google.com/go/filestore v1.8.0 h1:/+wUEGwk3x3Kxomi2cP5dsR8+SIXxo7M0THDjreFSYo= -cloud.google.com/go/firestore v1.14.0 h1:8aLcKnMPoldYU3YHgu4t2exrKhLQkqaXAGqT0ljrFVw= -cloud.google.com/go/functions v1.15.4 h1:ZjdiV3MyumRM6++1Ixu6N0VV9LAGlCX4AhW6Yjr1t+U= -cloud.google.com/go/gkebackup v1.3.4 h1:KhnOrr9A1tXYIYeXKqCKbCI8TL2ZNGiD3dm+d7BDUBg= -cloud.google.com/go/gkeconnect v0.8.4 h1:1JLpZl31YhQDQeJ98tK6QiwTpgHFYRJwpntggpQQWis= -cloud.google.com/go/gkehub v0.14.4 h1:J5tYUtb3r0cl2mM7+YHvV32eL+uZQ7lONyUZnPikCEo= -cloud.google.com/go/gkemulticloud v1.1.0 h1:C2Suwn3uPz+Yy0bxVjTlsMrUCaDovkgvfdyIa+EnUOU= -cloud.google.com/go/gsuiteaddons v1.6.4 h1:uuw2Xd37yHftViSI8J2hUcCS8S7SH3ZWH09sUDLW30Q= -cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI= -cloud.google.com/go/iap v1.9.3 h1:M4vDbQ4TLXdaljXVZSwW7XtxpwXUUarY2lIs66m0aCM= -cloud.google.com/go/ids v1.4.4 h1:VuFqv2ctf/A7AyKlNxVvlHTzjrEvumWaZflUzBPz/M4= -cloud.google.com/go/iot v1.7.4 h1:m1WljtkZnvLTIRYW1YTOv5A6H1yKgLHR6nU7O8yf27w= -cloud.google.com/go/kms v1.15.5 h1:pj1sRfut2eRbD9pFRjNnPNg/CzJPuQAzUujMIM1vVeM= -cloud.google.com/go/language v1.12.2 h1:zg9uq2yS9PGIOdc0Kz/l+zMtOlxKWonZjjo5w5YPG2A= -cloud.google.com/go/lifesciences v0.9.4 h1:rZEI/UxcxVKEzyoRS/kdJ1VoolNItRWjNN0Uk9tfexg= -cloud.google.com/go/logging v1.9.0 h1:iEIOXFO9EmSiTjDmfpbRjOxECO7R8C7b8IXUGOj7xZw= -cloud.google.com/go/longrunning v0.5.4 h1:w8xEcbZodnA2BbW6sVirkkoC+1gP8wS57EUUgGS0GVg= -cloud.google.com/go/managedidentities v1.6.4 h1:SF/u1IJduMqQQdJA4MDyivlIQ4SrV5qAawkr/ZEREkY= -cloud.google.com/go/maps v1.6.3 h1:Qqs6Dza+PRp5CZO5AfgPnLwU1k3pp0IMFRDtLpT+aCA= -cloud.google.com/go/mediatranslation v0.8.4 h1:VRCQfZB4s6jN0CSy7+cO3m4ewNwgVnaePanVCQh/9Z4= -cloud.google.com/go/memcache v1.10.4 h1:cdex/ayDd294XBj2cGeMe6Y+H1JvhN8y78B9UW7pxuQ= -cloud.google.com/go/metastore v1.13.3 h1:94l/Yxg9oBZjin2bzI79oK05feYefieDq0o5fjLSkC8= -cloud.google.com/go/monitoring v1.17.0 h1:blrdvF0MkPPivSO041ihul7rFMhXdVp8Uq7F59DKXTU= -cloud.google.com/go/networkconnectivity v1.14.3 h1:e9lUkCe2BexsqsUc2bjV8+gFBpQa54J+/F3qKVtW+wA= -cloud.google.com/go/networkmanagement v1.9.3 h1:HsQk4FNKJUX04k3OI6gUsoveiHMGvDRqlaFM2xGyvqU= -cloud.google.com/go/networksecurity v0.9.4 h1:947tNIPnj1bMGTIEBo3fc4QrrFKS5hh0bFVsHmFm4Vo= -cloud.google.com/go/notebooks v1.11.2 h1:eTOTfNL1yM6L/PCtquJwjWg7ZZGR0URFaFgbs8kllbM= -cloud.google.com/go/optimization v1.6.2 h1:iFsoexcp13cGT3k/Hv8PA5aK+FP7FnbhwDO9llnruas= -cloud.google.com/go/orchestration v1.8.4 h1:kgwZ2f6qMMYIVBtUGGoU8yjYWwMTHDanLwM/CQCFaoQ= -cloud.google.com/go/orgpolicy v1.12.0 h1:sab7cDiyfdthpAL0JkSpyw1C3mNqkXToVOhalm79PJQ= -cloud.google.com/go/osconfig v1.12.4 h1:OrRCIYEAbrbXdhm13/JINn9pQchvTTIzgmOCA7uJw8I= -cloud.google.com/go/oslogin v1.13.0 h1:gbA/G4p+youIR4O/Rk6DU181QlBlpwPS16kvJwqEz8o= -cloud.google.com/go/phishingprotection v0.8.4 h1:sPLUQkHq6b4AL0czSJZ0jd6vL55GSTHz2B3Md+TCZI0= -cloud.google.com/go/policytroubleshooter v1.10.2 h1:sq+ScLP83d7GJy9+wpwYJVnY+q6xNTXwOdRIuYjvHT4= -cloud.google.com/go/privatecatalog v0.9.4 h1:Vo10IpWKbNvc/z/QZPVXgCiwfjpWoZ/wbgful4Uh/4E= -cloud.google.com/go/pubsub v1.34.0 h1:ZtPbfwfi5rLaPeSvDC29fFoE20/tQvGrUS6kVJZJvkU= -cloud.google.com/go/pubsublite v1.8.1 h1:pX+idpWMIH30/K7c0epN6V703xpIcMXWRjKJsz0tYGY= -cloud.google.com/go/recaptchaenterprise/v2 v2.9.0 h1:Zrd4LvT9PaW91X/Z13H0i5RKEv9suCLuk8zp+bfOpN4= -cloud.google.com/go/recommendationengine v0.8.4 h1:JRiwe4hvu3auuh2hujiTc2qNgPPfVp+Q8KOpsXlEzKQ= -cloud.google.com/go/recommender v1.12.0 h1:tC+ljmCCbuZ/ybt43odTFlay91n/HLIhflvaOeb0Dh4= -cloud.google.com/go/redis v1.14.1 h1:J9cEHxG9YLmA9o4jTSvWt/RuVEn6MTrPlYSCRHujxDQ= -cloud.google.com/go/resourcemanager v1.9.4 h1:JwZ7Ggle54XQ/FVYSBrMLOQIKoIT/uer8mmNvNLK51k= -cloud.google.com/go/resourcesettings v1.6.4 h1:yTIL2CsZswmMfFyx2Ic77oLVzfBFoWBYgpkgiSPnC4Y= -cloud.google.com/go/retail v1.14.4 h1:geqdX1FNqqL2p0ADXjPpw8lq986iv5GrVcieTYafuJQ= -cloud.google.com/go/run v1.3.3 h1:qdfZteAm+vgzN1iXzILo3nJFQbzziudkJrvd9wCf3FQ= -cloud.google.com/go/scheduler v1.10.5 h1:eMEettHlFhG5pXsoHouIM5nRT+k+zU4+GUvRtnxhuVI= -cloud.google.com/go/secretmanager v1.11.4 h1:krnX9qpG2kR2fJ+u+uNyNo+ACVhplIAS4Pu7u+4gd+k= -cloud.google.com/go/security v1.15.4 h1:sdnh4Islb1ljaNhpIXlIPgb3eYj70QWgPVDKOUYvzJc= -cloud.google.com/go/securitycenter v1.24.3 h1:crdn2Z2rFIy8WffmmhdlX3CwZJusqCiShtnrGFRwpeE= -cloud.google.com/go/servicedirectory v1.11.3 h1:5niCMfkw+jifmFtbBrtRedbXkJm3fubSR/KHbxSJZVM= -cloud.google.com/go/shell v1.7.4 h1:nurhlJcSVFZneoRZgkBEHumTYf/kFJptCK2eBUq/88M= -cloud.google.com/go/spanner v1.55.0 h1:YF/A/k73EMYCjp8wcJTpkE+TcrWutHRlsCtlRSfWS64= -cloud.google.com/go/speech v1.21.0 h1:qkxNao58oF8ghAHE1Eghen7XepawYEN5zuZXYWaUTA4= -cloud.google.com/go/storagetransfer v1.10.3 h1:YM1dnj5gLjfL6aDldO2s4GeU8JoAvH1xyIwXre63KmI= -cloud.google.com/go/talent v1.6.5 h1:LnRJhhYkODDBoTwf6BeYkiJHFw9k+1mAFNyArwZUZAs= -cloud.google.com/go/texttospeech v1.7.4 h1:ahrzTgr7uAbvebuhkBAAVU6kRwVD0HWsmDsvMhtad5Q= -cloud.google.com/go/tpu v1.6.4 h1:XIEH5c0WeYGaVy9H+UueiTaf3NI6XNdB4/v6TFQJxtE= -cloud.google.com/go/trace v1.10.4 h1:2qOAuAzNezwW3QN+t41BtkDJOG42HywL73q8x/f6fnM= -cloud.google.com/go/translate v1.10.0 h1:tncNaKmlZnayMMRX/mMM2d5AJftecznnxVBD4w070NI= -cloud.google.com/go/video v1.20.3 h1:Xrpbm2S9UFQ1pZEeJt9Vqm5t2T/z9y/M3rNXhFoo8Is= -cloud.google.com/go/videointelligence v1.11.4 h1:YS4j7lY0zxYyneTFXjBJUj2r4CFe/UoIi/PJG0Zt/Rg= -cloud.google.com/go/vision/v2 v2.7.5 h1:T/ujUghvEaTb+YnFY/jiYwVAkMbIC8EieK0CJo6B4vg= -cloud.google.com/go/vmmigration v1.7.4 h1:qPNdab4aGgtaRX+51jCOtJxlJp6P26qua4o1xxUDjpc= -cloud.google.com/go/vmwareengine v1.0.3 h1:WY526PqM6QNmFHSqe2sRfK6gRpzWjmL98UFkql2+JDM= -cloud.google.com/go/vpcaccess v1.7.4 h1:zbs3V+9ux45KYq8lxxn/wgXole6SlBHHKKyZhNJoS+8= -cloud.google.com/go/webrisk v1.9.4 h1:iceR3k0BCRZgf2D/NiKviVMFfuNC9LmeNLtxUFRB/wI= -cloud.google.com/go/websecurityscanner v1.6.4 h1:5Gp7h5j7jywxLUp6NTpjNPkgZb3ngl0tUSw6ICWvtJQ= -cloud.google.com/go/workflows v1.12.3 h1:qocsqETmLAl34mSa01hKZjcqAvt699gaoFbooGGMvaM= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= github.com/antihax/optional v1.0.0 h1:xK2lYat7ZLaVVcIuj82J8kIro4V6kDe0AUDFboUCwcg= @@ -136,49 +20,38 @@ github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMr github.com/checkpoint-restore/go-criu/v6 v6.3.0 h1:mIdrSO2cPNWQY1truPg6uHLXyKHk3Z5Odx4wjKOASzA= github.com/chzyer/readline v1.5.1 h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI= github.com/cilium/ebpf v0.16.0 h1:+BiEnHL6Z7lXnlGUsXQPPAE7+kenAd4ES8MQ5min0Ok= -github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f h1:WBZRG4aNOuI15bLRrCgN8fCq8E5Xuty6jGbmSNEvSsU= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 h1:QVw89YDxXxEe+l8gU8ETbOasdwEV+avkR75ZzsVV9WI= github.com/containerd/console v1.0.4 h1:F2g4+oChYvBTsASRTz8NP6iIAi97J3TtSAsLbIFn4ro= -github.com/envoyproxy/go-control-plane v0.12.0 h1:4X+VP1GHd1Mhj6IB5mMeGbLCleqxjletLK6K0rbxyZI= -github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= +github.com/envoyproxy/go-control-plane v0.13.0 h1:HzkeUz1Knt+3bK+8LG1bxOO/jzWZmdxpwC51i202les= +github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= -github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk= github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= -github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= -github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4= +github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/ianlancetaylor/demangle v0.0.0-20240312041847-bd984b5ce465 h1:KwWnWVWCNtNq/ewIX7HIKnELmEx2nDP42yskD/pi7QE= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/kisielk/errcheck v1.5.0 h1:e8esj/e4R+SAOwFwN+n3zr0nYeCyeweozKfO23MvHzY= github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= -github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw= github.com/matttproud/golang_protobuf_extensions v1.0.2 h1:hAHbPm5IJGijwng3PWk09JkG9WeqChjprR5s9bBZ+OM= github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= -github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= github.com/rogpeppe/fastuuid v1.2.0 h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s= github.com/seccomp/libseccomp-golang v0.10.0 h1:aA4bp+/Zzi0BnWZ2F1wgNBs5gTpm+na2rWM6M9YjLpY= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= github.com/urfave/cli v1.22.14 h1:ebbhrRiGK2i4naQJr+1Xj92HXZCrK7MsyTS/ob3HnAk= github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE= -go.etcd.io/gofail v0.1.0 h1:XItAMIhOojXFQMgrxjnd2EIIHun/d5qL0Pf7FzVTkFg= +go.etcd.io/gofail v0.2.0 h1:p19drv16FKK345a09a1iubchlw/vmRuksmRzgBIGjcA= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s= -go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0= golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457 h1:zf5N6UOrA487eEFacMePxjXAJctxKmyjKUsjA11Uzuk= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gotest.tools/v3 v3.0.2 h1:kG1BFyqVHuQoVQiR1bWGnfz/fmHvvuiSPIV7rvl360E= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs= sigs.k8s.io/kustomize/cmd/config v0.15.0 h1:WkdY8V2+8J+W00YbImXa2ke9oegfrHH79e+kywW7EdU= diff --git a/hack/unwanted-dependencies.json b/hack/unwanted-dependencies.json index 2cc90dd509e2a..b51f46e2b5f77 100644 --- a/hack/unwanted-dependencies.json +++ b/hack/unwanted-dependencies.json @@ -80,18 +80,8 @@ }, "status": { "unwantedReferences": { - "cloud.google.com/go": [ - "google.golang.org/genproto" - ], - "cloud.google.com/go/bigquery": [ - "google.golang.org/genproto" - ], "cloud.google.com/go/compute": [ - "github.com/google/cadvisor", - "google.golang.org/genproto" - ], - "cloud.google.com/go/firestore": [ - "google.golang.org/genproto" + "github.com/google/cadvisor" ], "github.com/davecgh/go-spew": [ "github.com/cyphar/filepath-securejoin", @@ -101,15 +91,18 @@ "github.com/go-task/slim-sprig/v3", "github.com/google/cadvisor", "github.com/json-iterator/go", - "github.com/prometheus/client_golang", "github.com/prometheus/common", "github.com/sirupsen/logrus", "github.com/stretchr/objx", "github.com/stretchr/testify", "go.etcd.io/bbolt", + "go.etcd.io/etcd/api/v3", "go.etcd.io/etcd/client/pkg/v3", + "go.etcd.io/etcd/client/v2", + "go.etcd.io/etcd/client/v3", "go.etcd.io/etcd/pkg/v3", "go.etcd.io/etcd/server/v3", + "go.etcd.io/raft/v3", "go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful", "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc", "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp", @@ -129,18 +122,14 @@ "sigs.k8s.io/kustomize/kustomize/v5", "sigs.k8s.io/kustomize/kyaml" ], - "github.com/go-kit/kit": [ - "github.com/grpc-ecosystem/go-grpc-middleware" - ], "github.com/gogo/protobuf": [ "github.com/containerd/containerd/api", "github.com/containerd/ttrpc", "github.com/google/cadvisor", - "github.com/grpc-ecosystem/go-grpc-middleware", "go.etcd.io/etcd/api/v3", "go.etcd.io/etcd/client/v3", - "go.etcd.io/etcd/raft/v3", "go.etcd.io/etcd/server/v3", + "go.etcd.io/raft/v3", "k8s.io/api", "k8s.io/apiextensions-apiserver", "k8s.io/apimachinery", @@ -184,14 +173,8 @@ "go.etcd.io/etcd/server/v3", "k8s.io/apiserver" ], - "github.com/grpc-ecosystem/grpc-gateway": [ - "go.etcd.io/etcd/api/v3", - "go.etcd.io/etcd/server/v3" - ], "github.com/json-iterator/go": [ "github.com/prometheus/client_golang", - "go.etcd.io/etcd/client/v2", - "go.etcd.io/etcd/server/v3", "go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful", "k8s.io/kube-openapi", "sigs.k8s.io/structured-merge-diff/v4" @@ -207,7 +190,6 @@ "github.com/pkg/errors": [ "github.com/Microsoft/hnslib", "github.com/google/cadvisor", - "github.com/grpc-ecosystem/go-grpc-middleware", "k8s.io/kubectl", "k8s.io/kubernetes", "sigs.k8s.io/kustomize/api", @@ -219,23 +201,9 @@ "github.com/opencontainers/runc", "k8s.io/mount-utils" ], - "google.golang.org/appengine": [ - "github.com/prometheus/client_golang" - ], - "google.golang.org/genproto": [ - "github.com/grpc-ecosystem/go-grpc-middleware", - "github.com/grpc-ecosystem/grpc-gateway", - "go.etcd.io/etcd/api/v3", - "go.etcd.io/etcd/client/v3", - "go.etcd.io/etcd/server/v3" - ], "gopkg.in/yaml.v2": [ - "github.com/grpc-ecosystem/grpc-gateway", "github.com/prometheus/client_golang", - "github.com/prometheus/common", - "go.etcd.io/etcd/api/v3", - "go.etcd.io/etcd/client/v3", - "go.etcd.io/etcd/server/v3" + "github.com/prometheus/common" ] }, "unwantedVendored": [ @@ -245,12 +213,10 @@ "github.com/google/shlex", "github.com/gregjones/httpcache", "github.com/grpc-ecosystem/go-grpc-prometheus", - "github.com/grpc-ecosystem/grpc-gateway", "github.com/json-iterator/go", "github.com/mailru/easyjson", "github.com/pkg/errors", - "golang.org/x/exp", - "google.golang.org/genproto" + "golang.org/x/exp" ] } } diff --git a/staging/src/k8s.io/api/go.mod b/staging/src/k8s.io/api/go.mod index 53a5eab29c3d5..8c134a20330cc 100644 --- a/staging/src/k8s.io/api/go.mod +++ b/staging/src/k8s.io/api/go.mod @@ -22,11 +22,10 @@ require ( github.com/kr/pretty v0.3.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/x448/float16 v0.8.4 // indirect golang.org/x/net v0.30.0 // indirect - golang.org/x/text v0.19.0 // indirect + golang.org/x/text v0.20.0 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/staging/src/k8s.io/api/go.sum b/staging/src/k8s.io/api/go.sum index c868125667cef..e24d3c5d07bed 100644 --- a/staging/src/k8s.io/api/go.sum +++ b/staging/src/k8s.io/api/go.sum @@ -53,8 +53,8 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -81,17 +81,17 @@ golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= diff --git a/staging/src/k8s.io/apiextensions-apiserver/go.mod b/staging/src/k8s.io/apiextensions-apiserver/go.mod index cb77f5fff1ebb..8938ae8b92f14 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/go.mod +++ b/staging/src/k8s.io/apiextensions-apiserver/go.mod @@ -18,11 +18,11 @@ require ( github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.9.0 - go.etcd.io/etcd/client/pkg/v3 v3.5.16 - go.etcd.io/etcd/client/v3 v3.5.16 - go.opentelemetry.io/otel v1.28.0 - go.opentelemetry.io/otel/trace v1.28.0 - google.golang.org/grpc v1.65.0 + go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd + go.etcd.io/etcd/client/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd + go.opentelemetry.io/otel v1.32.0 + go.opentelemetry.io/otel/trace v1.32.0 + google.golang.org/grpc v1.68.0 google.golang.org/protobuf v1.35.1 gopkg.in/evanphx/json-patch.v4 v4.12.0 k8s.io/api v0.0.0 @@ -59,18 +59,18 @@ require ( github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.23.0 // indirect - github.com/golang-jwt/jwt/v4 v4.5.0 // indirect + github.com/golang-jwt/jwt/v4 v4.5.1 // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/google/btree v1.0.1 // indirect + github.com/google/btree v1.1.3 // indirect github.com/gorilla/websocket v1.5.0 // indirect - github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect - github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jonboulle/clockwork v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.17.9 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/moby/spdystream v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect @@ -79,9 +79,9 @@ require ( github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/common v0.60.1 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/soheilhy/cmux v0.1.5 // indirect @@ -89,18 +89,18 @@ require ( github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510 // indirect - go.etcd.io/bbolt v1.3.11 // indirect - go.etcd.io/etcd/api/v3 v3.5.16 // indirect - go.etcd.io/etcd/client/v2 v2.305.16 // indirect - go.etcd.io/etcd/pkg/v3 v3.5.16 // indirect - go.etcd.io/etcd/raft/v3 v3.5.16 // indirect - go.etcd.io/etcd/server/v3 v3.5.16 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect + go.etcd.io/bbolt v1.4.0-beta.0 // indirect + go.etcd.io/etcd/api/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd // indirect + go.etcd.io/etcd/client/v2 v2.306.0-alpha.0 // indirect + go.etcd.io/etcd/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd // indirect + go.etcd.io/etcd/server/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd // indirect + go.etcd.io/raft/v3 v3.6.0-alpha.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect - go.opentelemetry.io/otel/metric v1.28.0 // indirect - go.opentelemetry.io/otel/sdk v1.28.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 // indirect + go.opentelemetry.io/otel/metric v1.32.0 // indirect + go.opentelemetry.io/otel/sdk v1.32.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect @@ -109,15 +109,14 @@ require ( golang.org/x/mod v0.21.0 // indirect golang.org/x/net v0.30.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.26.0 // indirect + golang.org/x/sync v0.9.0 // indirect + golang.org/x/sys v0.27.0 // indirect golang.org/x/term v0.25.0 // indirect - golang.org/x/text v0.19.0 // indirect - golang.org/x/time v0.7.0 // indirect + golang.org/x/text v0.20.0 // indirect + golang.org/x/time v0.8.0 // indirect golang.org/x/tools v0.26.0 // indirect - google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/staging/src/k8s.io/apiextensions-apiserver/go.sum b/staging/src/k8s.io/apiextensions-apiserver/go.sum index d142701e0b9dd..68217eb0d9234 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/go.sum +++ b/staging/src/k8s.io/apiextensions-apiserver/go.sum @@ -1,126 +1,7 @@ cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo= cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4= -cloud.google.com/go/accessapproval v1.7.4/go.mod h1:/aTEh45LzplQgFYdQdwPMR9YdX0UlhBmvB84uAmQKUc= -cloud.google.com/go/accesscontextmanager v1.8.4/go.mod h1:ParU+WbMpD34s5JFEnGAnPBYAgUHozaTmDJU7aCU9+M= -cloud.google.com/go/aiplatform v1.58.0/go.mod h1:pwZMGvqe0JRkI1GWSZCtnAfrR4K1bv65IHILGA//VEU= -cloud.google.com/go/analytics v0.22.0/go.mod h1:eiROFQKosh4hMaNhF85Oc9WO97Cpa7RggD40e/RBy8w= -cloud.google.com/go/apigateway v1.6.4/go.mod h1:0EpJlVGH5HwAN4VF4Iec8TAzGN1aQgbxAWGJsnPCGGY= -cloud.google.com/go/apigeeconnect v1.6.4/go.mod h1:CapQCWZ8TCjnU0d7PobxhpOdVz/OVJ2Hr/Zcuu1xFx0= -cloud.google.com/go/apigeeregistry v0.8.2/go.mod h1:h4v11TDGdeXJDJvImtgK2AFVvMIgGWjSb0HRnBSjcX8= -cloud.google.com/go/appengine v1.8.4/go.mod h1:TZ24v+wXBujtkK77CXCpjZbnuTvsFNT41MUaZ28D6vg= -cloud.google.com/go/area120 v0.8.4/go.mod h1:jfawXjxf29wyBXr48+W+GyX/f8fflxp642D/bb9v68M= -cloud.google.com/go/artifactregistry v1.14.6/go.mod h1:np9LSFotNWHcjnOgh8UVK0RFPCTUGbO0ve3384xyHfE= -cloud.google.com/go/asset v1.17.0/go.mod h1:yYLfUD4wL4X589A9tYrv4rFrba0QlDeag0CMcM5ggXU= -cloud.google.com/go/assuredworkloads v1.11.4/go.mod h1:4pwwGNwy1RP0m+y12ef3Q/8PaiWrIDQ6nD2E8kvWI9U= -cloud.google.com/go/automl v1.13.4/go.mod h1:ULqwX/OLZ4hBVfKQaMtxMSTlPx0GqGbWN8uA/1EqCP8= -cloud.google.com/go/baremetalsolution v1.2.3/go.mod h1:/UAQ5xG3faDdy180rCUv47e0jvpp3BFxT+Cl0PFjw5g= -cloud.google.com/go/batch v1.7.0/go.mod h1:J64gD4vsNSA2O5TtDB5AAux3nJ9iV8U3ilg3JDBYejU= -cloud.google.com/go/beyondcorp v1.0.3/go.mod h1:HcBvnEd7eYr+HGDd5ZbuVmBYX019C6CEXBonXbCVwJo= -cloud.google.com/go/bigquery v1.58.0/go.mod h1:0eh4mWNY0KrBTjUzLjoYImapGORq9gEPT7MWjCy9lik= -cloud.google.com/go/billing v1.18.0/go.mod h1:5DOYQStCxquGprqfuid/7haD7th74kyMBHkjO/OvDtk= -cloud.google.com/go/binaryauthorization v1.8.0/go.mod h1:VQ/nUGRKhrStlGr+8GMS8f6/vznYLkdK5vaKfdCIpvU= -cloud.google.com/go/certificatemanager v1.7.4/go.mod h1:FHAylPe/6IIKuaRmHbjbdLhGhVQ+CWHSD5Jq0k4+cCE= -cloud.google.com/go/channel v1.17.4/go.mod h1:QcEBuZLGGrUMm7kNj9IbU1ZfmJq2apotsV83hbxX7eE= -cloud.google.com/go/cloudbuild v1.15.0/go.mod h1:eIXYWmRt3UtggLnFGx4JvXcMj4kShhVzGndL1LwleEM= -cloud.google.com/go/clouddms v1.7.3/go.mod h1:fkN2HQQNUYInAU3NQ3vRLkV2iWs8lIdmBKOx4nrL6Hc= -cloud.google.com/go/cloudtasks v1.12.4/go.mod h1:BEPu0Gtt2dU6FxZHNqqNdGqIG86qyWKBPGnsb7udGY0= -cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= -cloud.google.com/go/contactcenterinsights v1.12.1/go.mod h1:HHX5wrz5LHVAwfI2smIotQG9x8Qd6gYilaHcLLLmNis= -cloud.google.com/go/container v1.29.0/go.mod h1:b1A1gJeTBXVLQ6GGw9/9M4FG94BEGsqJ5+t4d/3N7O4= -cloud.google.com/go/containeranalysis v0.11.3/go.mod h1:kMeST7yWFQMGjiG9K7Eov+fPNQcGhb8mXj/UcTiWw9U= -cloud.google.com/go/datacatalog v1.19.2/go.mod h1:2YbODwmhpLM4lOFe3PuEhHK9EyTzQJ5AXgIy7EDKTEE= -cloud.google.com/go/dataflow v0.9.4/go.mod h1:4G8vAkHYCSzU8b/kmsoR2lWyHJD85oMJPHMtan40K8w= -cloud.google.com/go/dataform v0.9.1/go.mod h1:pWTg+zGQ7i16pyn0bS1ruqIE91SdL2FDMvEYu/8oQxs= -cloud.google.com/go/datafusion v1.7.4/go.mod h1:BBs78WTOLYkT4GVZIXQCZT3GFpkpDN4aBY4NDX/jVlM= -cloud.google.com/go/datalabeling v0.8.4/go.mod h1:Z1z3E6LHtffBGrNUkKwbwbDxTiXEApLzIgmymj8A3S8= -cloud.google.com/go/dataplex v1.14.0/go.mod h1:mHJYQQ2VEJHsyoC0OdNyy988DvEbPhqFs5OOLffLX0c= -cloud.google.com/go/dataproc/v2 v2.3.0/go.mod h1:G5R6GBc9r36SXv/RtZIVfB8SipI+xVn0bX5SxUzVYbY= -cloud.google.com/go/dataqna v0.8.4/go.mod h1:mySRKjKg5Lz784P6sCov3p1QD+RZQONRMRjzGNcFd0c= -cloud.google.com/go/datastore v1.15.0/go.mod h1:GAeStMBIt9bPS7jMJA85kgkpsMkvseWWXiaHya9Jes8= -cloud.google.com/go/datastream v1.10.3/go.mod h1:YR0USzgjhqA/Id0Ycu1VvZe8hEWwrkjuXrGbzeDOSEA= -cloud.google.com/go/deploy v1.17.0/go.mod h1:XBr42U5jIr64t92gcpOXxNrqL2PStQCXHuKK5GRUuYo= -cloud.google.com/go/dialogflow v1.48.1/go.mod h1:C1sjs2/g9cEwjCltkKeYp3FFpz8BOzNondEaAlCpt+A= -cloud.google.com/go/dlp v1.11.1/go.mod h1:/PA2EnioBeXTL/0hInwgj0rfsQb3lpE3R8XUJxqUNKI= -cloud.google.com/go/documentai v1.23.7/go.mod h1:ghzBsyVTiVdkfKaUCum/9bGBEyBjDO4GfooEcYKhN+g= -cloud.google.com/go/domains v0.9.4/go.mod h1:27jmJGShuXYdUNjyDG0SodTfT5RwLi7xmH334Gvi3fY= -cloud.google.com/go/edgecontainer v1.1.4/go.mod h1:AvFdVuZuVGdgaE5YvlL1faAoa1ndRR/5XhXZvPBHbsE= -cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= -cloud.google.com/go/essentialcontacts v1.6.5/go.mod h1:jjYbPzw0x+yglXC890l6ECJWdYeZ5dlYACTFL0U/VuM= -cloud.google.com/go/eventarc v1.13.3/go.mod h1:RWH10IAZIRcj1s/vClXkBgMHwh59ts7hSWcqD3kaclg= -cloud.google.com/go/filestore v1.8.0/go.mod h1:S5JCxIbFjeBhWMTfIYH2Jx24J6BqjwpkkPl+nBA5DlI= -cloud.google.com/go/firestore v1.14.0/go.mod h1:96MVaHLsEhbvkBEdZgfN+AS/GIkco1LRpH9Xp9YZfzQ= -cloud.google.com/go/functions v1.15.4/go.mod h1:CAsTc3VlRMVvx+XqXxKqVevguqJpnVip4DdonFsX28I= -cloud.google.com/go/gkebackup v1.3.4/go.mod h1:gLVlbM8h/nHIs09ns1qx3q3eaXcGSELgNu1DWXYz1HI= -cloud.google.com/go/gkeconnect v0.8.4/go.mod h1:84hZz4UMlDCKl8ifVW8layK4WHlMAFeq8vbzjU0yJkw= -cloud.google.com/go/gkehub v0.14.4/go.mod h1:Xispfu2MqnnFt8rV/2/3o73SK1snL8s9dYJ9G2oQMfc= -cloud.google.com/go/gkemulticloud v1.1.0/go.mod h1:7NpJBN94U6DY1xHIbsDqB2+TFZUfjLUKLjUX8NGLor0= -cloud.google.com/go/gsuiteaddons v1.6.4/go.mod h1:rxtstw7Fx22uLOXBpsvb9DUbC+fiXs7rF4U29KHM/pE= -cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8= -cloud.google.com/go/iap v1.9.3/go.mod h1:DTdutSZBqkkOm2HEOTBzhZxh2mwwxshfD/h3yofAiCw= -cloud.google.com/go/ids v1.4.4/go.mod h1:z+WUc2eEl6S/1aZWzwtVNWoSZslgzPxAboS0lZX0HjI= -cloud.google.com/go/iot v1.7.4/go.mod h1:3TWqDVvsddYBG++nHSZmluoCAVGr1hAcabbWZNKEZLk= -cloud.google.com/go/kms v1.15.5/go.mod h1:cU2H5jnp6G2TDpUGZyqTCoy1n16fbubHZjmVXSMtwDI= -cloud.google.com/go/language v1.12.2/go.mod h1:9idWapzr/JKXBBQ4lWqVX/hcadxB194ry20m/bTrhWc= -cloud.google.com/go/lifesciences v0.9.4/go.mod h1:bhm64duKhMi7s9jR9WYJYvjAFJwRqNj+Nia7hF0Z7JA= -cloud.google.com/go/logging v1.9.0/go.mod h1:1Io0vnZv4onoUnsVUQY3HZ3Igb1nBchky0A0y7BBBhE= -cloud.google.com/go/longrunning v0.5.4/go.mod h1:zqNVncI0BOP8ST6XQD1+VcvuShMmq7+xFSzOL++V0dI= -cloud.google.com/go/managedidentities v1.6.4/go.mod h1:WgyaECfHmF00t/1Uk8Oun3CQ2PGUtjc3e9Alh79wyiM= -cloud.google.com/go/maps v1.6.3/go.mod h1:VGAn809ADswi1ASofL5lveOHPnE6Rk/SFTTBx1yuOLw= -cloud.google.com/go/mediatranslation v0.8.4/go.mod h1:9WstgtNVAdN53m6TQa5GjIjLqKQPXe74hwSCxUP6nj4= -cloud.google.com/go/memcache v1.10.4/go.mod h1:v/d8PuC8d1gD6Yn5+I3INzLR01IDn0N4Ym56RgikSI0= -cloud.google.com/go/metastore v1.13.3/go.mod h1:K+wdjXdtkdk7AQg4+sXS8bRrQa9gcOr+foOMF2tqINE= -cloud.google.com/go/monitoring v1.17.0/go.mod h1:KwSsX5+8PnXv5NJnICZzW2R8pWTis8ypC4zmdRD63Tw= -cloud.google.com/go/networkconnectivity v1.14.3/go.mod h1:4aoeFdrJpYEXNvrnfyD5kIzs8YtHg945Og4koAjHQek= -cloud.google.com/go/networkmanagement v1.9.3/go.mod h1:y7WMO1bRLaP5h3Obm4tey+NquUvB93Co1oh4wpL+XcU= -cloud.google.com/go/networksecurity v0.9.4/go.mod h1:E9CeMZ2zDsNBkr8axKSYm8XyTqNhiCHf1JO/Vb8mD1w= -cloud.google.com/go/notebooks v1.11.2/go.mod h1:z0tlHI/lREXC8BS2mIsUeR3agM1AkgLiS+Isov3SS70= -cloud.google.com/go/optimization v1.6.2/go.mod h1:mWNZ7B9/EyMCcwNl1frUGEuY6CPijSkz88Fz2vwKPOY= -cloud.google.com/go/orchestration v1.8.4/go.mod h1:d0lywZSVYtIoSZXb0iFjv9SaL13PGyVOKDxqGxEf/qI= -cloud.google.com/go/orgpolicy v1.12.0/go.mod h1:0+aNV/nrfoTQ4Mytv+Aw+stBDBjNf4d8fYRA9herfJI= -cloud.google.com/go/osconfig v1.12.4/go.mod h1:B1qEwJ/jzqSRslvdOCI8Kdnp0gSng0xW4LOnIebQomA= -cloud.google.com/go/oslogin v1.13.0/go.mod h1:xPJqLwpTZ90LSE5IL1/svko+6c5avZLluiyylMb/sRA= -cloud.google.com/go/phishingprotection v0.8.4/go.mod h1:6b3kNPAc2AQ6jZfFHioZKg9MQNybDg4ixFd4RPZZ2nE= -cloud.google.com/go/policytroubleshooter v1.10.2/go.mod h1:m4uF3f6LseVEnMV6nknlN2vYGRb+75ylQwJdnOXfnv0= -cloud.google.com/go/privatecatalog v0.9.4/go.mod h1:SOjm93f+5hp/U3PqMZAHTtBtluqLygrDrVO8X8tYtG0= -cloud.google.com/go/pubsub v1.34.0/go.mod h1:alj4l4rBg+N3YTFDDC+/YyFTs6JAjam2QfYsddcAW4c= -cloud.google.com/go/pubsublite v1.8.1/go.mod h1:fOLdU4f5xldK4RGJrBMm+J7zMWNj/k4PxwEZXy39QS0= -cloud.google.com/go/recaptchaenterprise/v2 v2.9.0/go.mod h1:Dak54rw6lC2gBY8FBznpOCAR58wKf+R+ZSJRoeJok4w= -cloud.google.com/go/recommendationengine v0.8.4/go.mod h1:GEteCf1PATl5v5ZsQ60sTClUE0phbWmo3rQ1Js8louU= -cloud.google.com/go/recommender v1.12.0/go.mod h1:+FJosKKJSId1MBFeJ/TTyoGQZiEelQQIZMKYYD8ruK4= -cloud.google.com/go/redis v1.14.1/go.mod h1:MbmBxN8bEnQI4doZPC1BzADU4HGocHBk2de3SbgOkqs= -cloud.google.com/go/resourcemanager v1.9.4/go.mod h1:N1dhP9RFvo3lUfwtfLWVxfUWq8+KUQ+XLlHLH3BoFJ0= -cloud.google.com/go/resourcesettings v1.6.4/go.mod h1:pYTTkWdv2lmQcjsthbZLNBP4QW140cs7wqA3DuqErVI= -cloud.google.com/go/retail v1.14.4/go.mod h1:l/N7cMtY78yRnJqp5JW8emy7MB1nz8E4t2yfOmklYfg= -cloud.google.com/go/run v1.3.3/go.mod h1:WSM5pGyJ7cfYyYbONVQBN4buz42zFqwG67Q3ch07iK4= -cloud.google.com/go/scheduler v1.10.5/go.mod h1:MTuXcrJC9tqOHhixdbHDFSIuh7xZF2IysiINDuiq6NI= -cloud.google.com/go/secretmanager v1.11.4/go.mod h1:wreJlbS9Zdq21lMzWmJ0XhWW2ZxgPeahsqeV/vZoJ3w= -cloud.google.com/go/security v1.15.4/go.mod h1:oN7C2uIZKhxCLiAAijKUCuHLZbIt/ghYEo8MqwD/Ty4= -cloud.google.com/go/securitycenter v1.24.3/go.mod h1:l1XejOngggzqwr4Fa2Cn+iWZGf+aBLTXtB/vXjy5vXM= -cloud.google.com/go/servicedirectory v1.11.3/go.mod h1:LV+cHkomRLr67YoQy3Xq2tUXBGOs5z5bPofdq7qtiAw= -cloud.google.com/go/shell v1.7.4/go.mod h1:yLeXB8eKLxw0dpEmXQ/FjriYrBijNsONpwnWsdPqlKM= -cloud.google.com/go/spanner v1.55.0/go.mod h1:HXEznMUVhC+PC+HDyo9YFG2Ajj5BQDkcbqB9Z2Ffxi0= -cloud.google.com/go/speech v1.21.0/go.mod h1:wwolycgONvfz2EDU8rKuHRW3+wc9ILPsAWoikBEWavY= -cloud.google.com/go/storagetransfer v1.10.3/go.mod h1:Up8LY2p6X68SZ+WToswpQbQHnJpOty/ACcMafuey8gc= -cloud.google.com/go/talent v1.6.5/go.mod h1:Mf5cma696HmE+P2BWJ/ZwYqeJXEeU0UqjHFXVLadEDI= -cloud.google.com/go/texttospeech v1.7.4/go.mod h1:vgv0002WvR4liGuSd5BJbWy4nDn5Ozco0uJymY5+U74= -cloud.google.com/go/tpu v1.6.4/go.mod h1:NAm9q3Rq2wIlGnOhpYICNI7+bpBebMJbh0yyp3aNw1Y= -cloud.google.com/go/trace v1.10.4/go.mod h1:Nso99EDIK8Mj5/zmB+iGr9dosS/bzWCJ8wGmE6TXNWY= -cloud.google.com/go/translate v1.10.0/go.mod h1:Kbq9RggWsbqZ9W5YpM94Q1Xv4dshw/gr/SHfsl5yCZ0= -cloud.google.com/go/video v1.20.3/go.mod h1:TnH/mNZKVHeNtpamsSPygSR0iHtvrR/cW1/GDjN5+GU= -cloud.google.com/go/videointelligence v1.11.4/go.mod h1:kPBMAYsTPFiQxMLmmjpcZUMklJp3nC9+ipJJtprccD8= -cloud.google.com/go/vision/v2 v2.7.5/go.mod h1:GcviprJLFfK9OLf0z8Gm6lQb6ZFUulvpZws+mm6yPLM= -cloud.google.com/go/vmmigration v1.7.4/go.mod h1:yBXCmiLaB99hEl/G9ZooNx2GyzgsjKnw5fWcINRgD70= -cloud.google.com/go/vmwareengine v1.0.3/go.mod h1:QSpdZ1stlbfKtyt6Iu19M6XRxjmXO+vb5a/R6Fvy2y4= -cloud.google.com/go/vpcaccess v1.7.4/go.mod h1:lA0KTvhtEOb/VOdnH/gwPuOzGgM+CWsmGu6bb4IoMKk= -cloud.google.com/go/webrisk v1.9.4/go.mod h1:w7m4Ib4C+OseSr2GL66m0zMBywdrVNTDKsdEsfMl7X0= -cloud.google.com/go/websecurityscanner v1.6.4/go.mod h1:mUiyMQ+dGpPPRkHgknIZeCzSHJ45+fY4F52nZFDHm2o= -cloud.google.com/go/workflows v1.12.3/go.mod h1:fmOUeeqEwPzIU81foMjTRQIdwQHADi/vEr1cx9R1m5g= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= @@ -138,13 +19,10 @@ github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA= github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= @@ -154,7 +32,7 @@ github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8 github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -163,22 +41,15 @@ github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkp github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= @@ -195,31 +66,23 @@ github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= -github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo= +github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g= github.com/google/cel-go v0.22.0/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs0yC4s8= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -228,21 +91,17 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 h1:ad0vkEBuk23VJzZR9nkLVG0YAoN9coASF1GusYX6AlU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= @@ -255,8 +114,8 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -264,6 +123,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= @@ -284,29 +145,26 @@ github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= +github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= @@ -319,13 +177,11 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= @@ -342,49 +198,45 @@ github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510/go.mod h1:UETIi67q github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0= -go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I= -go.etcd.io/etcd/api/v3 v3.5.16 h1:WvmyJVbjWqK4R1E+B12RRHz3bRGy9XVfh++MgbN+6n0= -go.etcd.io/etcd/api/v3 v3.5.16/go.mod h1:1P4SlIP/VwkDmGo3OlOD7faPeP8KDIFhqvciH5EfN28= -go.etcd.io/etcd/client/pkg/v3 v3.5.16 h1:ZgY48uH6UvB+/7R9Yf4x574uCO3jIx0TRDyetSfId3Q= -go.etcd.io/etcd/client/pkg/v3 v3.5.16/go.mod h1:V8acl8pcEK0Y2g19YlOV9m9ssUe6MgiDSobSoaBAM0E= -go.etcd.io/etcd/client/v2 v2.305.16 h1:kQrn9o5czVNaukf2A2At43cE9ZtWauOtf9vRZuiKXow= -go.etcd.io/etcd/client/v2 v2.305.16/go.mod h1:h9YxWCzcdvZENbfzBTFCnoNumr2ax3F19sKMqHFmXHE= -go.etcd.io/etcd/client/v3 v3.5.16 h1:sSmVYOAHeC9doqi0gv7v86oY/BTld0SEFGaxsU9eRhE= -go.etcd.io/etcd/client/v3 v3.5.16/go.mod h1:X+rExSGkyqxvu276cr2OwPLBaeqFu1cIl4vmRjAD/50= -go.etcd.io/etcd/pkg/v3 v3.5.16 h1:cnavs5WSPWeK4TYwPYfmcr3Joz9BH+TZ6qoUtz6/+mc= -go.etcd.io/etcd/pkg/v3 v3.5.16/go.mod h1:+lutCZHG5MBBFI/U4eYT5yL7sJfnexsoM20Y0t2uNuY= -go.etcd.io/etcd/raft/v3 v3.5.16 h1:zBXA3ZUpYs1AwiLGPafYAKKl/CORn/uaxYDwlNwndAk= -go.etcd.io/etcd/raft/v3 v3.5.16/go.mod h1:P4UP14AxofMJ/54boWilabqqWoW9eLodl6I5GdGzazI= -go.etcd.io/etcd/server/v3 v3.5.16 h1:d0/SAdJ3vVsZvF8IFVb1k8zqMZ+heGcNfft71ul9GWE= -go.etcd.io/etcd/server/v3 v3.5.16/go.mod h1:ynhyZZpdDp1Gq49jkUg5mfkDWZwXnn3eIqCqtJnrD/s= -go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= +go.etcd.io/bbolt v1.4.0-beta.0 h1:U7Y9yH6ZojEo5/BDFMXDXD1RNx9L7iKxudzqR68jLaM= +go.etcd.io/bbolt v1.4.0-beta.0/go.mod h1:Qv5yHB6jkQESXT/uVfxJgUPMqgAyhL0GLxcQaz9bSec= +go.etcd.io/etcd/api/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd h1:uuyUQSC6O58OfQH7KlKhmIbHjG/l+lTqyq6Y5vXOHcs= +go.etcd.io/etcd/api/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:kiyBidVS9nGwfQcXcXqUndUJkMBcp0AdZ7sXmwSMD2I= +go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd h1:l8QjUfDmZHqyNMTMwrZvIk+xbvZxOkmZjnFkmO9ktl8= +go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:5B0xZc6aoiWt7n0DYA7s3MrhTRfmp07hY7b/TykJQAs= +go.etcd.io/etcd/client/v2 v2.306.0-alpha.0 h1:9VRJ698EFIMfjOQtcjKMM7CWXOIxp9R4I8JA1mk+WT4= +go.etcd.io/etcd/client/v2 v2.306.0-alpha.0/go.mod h1:eW78BCfOzS1HJgTNzDrb2E6xV1p6kqlpLpKkz7ErzCs= +go.etcd.io/etcd/client/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd h1:Bpoj6mDJcLmMExbqoIFoqYYLzIu0bdVf/7XjP5UUs9E= +go.etcd.io/etcd/client/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:5b6FTfh+1atULPdGeukvpK3u5N8GMskQpb0Z9klI6sU= +go.etcd.io/etcd/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd h1:21vB+bra7pRWMRlZvgdDkb9FXExRshiUeDhTBvYIuLs= +go.etcd.io/etcd/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:FL+PMODrygIxX5lpNEyv1y1GKqFTXa0p/XH31SHpbcw= +go.etcd.io/etcd/server/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd h1:8EoX6i7o6cN9I1KbIpgUBVaY9YFQBRtV3wS68rpEf+s= +go.etcd.io/etcd/server/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:SSZTJkOelKEqXz6mwE1J9aSnZB3G7YROnHOGij32Opk= +go.etcd.io/gofail v0.2.0/go.mod h1:nL3ILMGfkXTekKI3clMBNazKnjUZjYLKmBHzsVAnC1o= +go.etcd.io/raft/v3 v3.6.0-alpha.0 h1:cMmjAEjCKMGiQPowjSWM43Y5ZnBEeNP8RSYcm3ewtns= +go.etcd.io/raft/v3 v3.6.0-alpha.0/go.mod h1:QpxpKeYmocQQFHP75LxNrdJTukZmqQig9lotwYLsUJY= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 h1:yMkBS9yViCc7U7yeLzJPM2XizlfdVvBRSmsQDWu6qc0= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0/go.mod h1:n8MR6/liuGB5EmTETUBeU5ZgqMOlqKRxUaqPQBOANZ8= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= -go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= +go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 h1:IJFEoHiytixx8cMiVAO+GmHR6Frwu+u5Ur8njpFO6Ac= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0/go.mod h1:3rHrKNtLIoS0oZwkY2vxi+oJcwFRWdtUyRII+so45p8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 h1:9kV11HXBHZAvuPUZxmMWrH8hZn/6UnHX4K0mu36vNsU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0/go.mod h1:JyA0FHXe22E1NeNiHmVp7kFHglnexDQ7uRWDiiJ1hKQ= +go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= +go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= +go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= +go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= +go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= +go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -392,55 +244,37 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20211123203042-d83791d6bcd9/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= @@ -448,15 +282,11 @@ golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -466,27 +296,12 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= -google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 h1:M0KvPgPmDZHPlbRbaNU1APr28TvwvvdUPlSv7PUvy8g= +google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/grpc v1.68.0 h1:aHQeeJbo8zAkAa3pRzrVjZlbz6uSfeOXlJNQM0RAbz0= +google.golang.org/grpc v1.68.0/go.mod h1:fmSPC5AsjSBCK54MyHRx48kpOti1/jRfOlwEWywNjWA= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -499,14 +314,10 @@ gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 h1:si3PfKm8dDYxgfbeA6orqrtLkvvIeH8UqffFJDl0bz4= k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= diff --git a/staging/src/k8s.io/apimachinery/go.mod b/staging/src/k8s.io/apimachinery/go.mod index 3ed7e46e73972..5e0a4ee6a7c75 100644 --- a/staging/src/k8s.io/apimachinery/go.mod +++ b/staging/src/k8s.io/apimachinery/go.mod @@ -22,7 +22,7 @@ require ( github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.9.0 golang.org/x/net v0.30.0 - golang.org/x/time v0.7.0 + golang.org/x/time v0.8.0 gopkg.in/evanphx/json-patch.v4 v4.12.0 gopkg.in/inf.v0 v0.9.1 k8s.io/klog/v2 v2.130.1 @@ -48,9 +48,10 @@ require ( github.com/onsi/gomega v1.35.1 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/rogpeppe/go-internal v1.13.1 // indirect github.com/x448/float16 v0.8.4 // indirect - golang.org/x/sys v0.26.0 // indirect - golang.org/x/text v0.19.0 // indirect + golang.org/x/sys v0.27.0 // indirect + golang.org/x/text v0.20.0 // indirect golang.org/x/tools v0.26.0 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/staging/src/k8s.io/apimachinery/go.sum b/staging/src/k8s.io/apimachinery/go.sum index 5bc9679b3c2ba..65811b037a2d6 100644 --- a/staging/src/k8s.io/apimachinery/go.sum +++ b/staging/src/k8s.io/apimachinery/go.sum @@ -74,8 +74,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -109,20 +109,20 @@ golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0= golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= diff --git a/staging/src/k8s.io/apiserver/go.mod b/staging/src/k8s.io/apiserver/go.mod index 82bdab56628f8..b918a4f418cea 100644 --- a/staging/src/k8s.io/apiserver/go.mod +++ b/staging/src/k8s.io/apiserver/go.mod @@ -15,7 +15,7 @@ require ( github.com/fsnotify/fsnotify v1.7.0 github.com/go-logr/logr v1.4.2 github.com/gogo/protobuf v1.3.2 - github.com/google/btree v1.0.1 + github.com/google/btree v1.1.3 github.com/google/cel-go v0.22.0 github.com/google/gnostic-models v0.6.8 github.com/google/go-cmp v0.6.0 @@ -27,25 +27,25 @@ require ( github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.9.0 - go.etcd.io/etcd/api/v3 v3.5.16 - go.etcd.io/etcd/client/pkg/v3 v3.5.16 - go.etcd.io/etcd/client/v3 v3.5.16 - go.etcd.io/etcd/server/v3 v3.5.16 - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 + go.etcd.io/etcd/api/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd + go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd + go.etcd.io/etcd/client/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd + go.etcd.io/etcd/server/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 - go.opentelemetry.io/otel v1.28.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 - go.opentelemetry.io/otel/metric v1.28.0 - go.opentelemetry.io/otel/sdk v1.28.0 - go.opentelemetry.io/otel/trace v1.28.0 + go.opentelemetry.io/otel v1.32.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 + go.opentelemetry.io/otel/metric v1.32.0 + go.opentelemetry.io/otel/sdk v1.32.0 + go.opentelemetry.io/otel/trace v1.32.0 go.uber.org/zap v1.27.0 golang.org/x/crypto v0.28.0 golang.org/x/net v0.30.0 - golang.org/x/sync v0.8.0 - golang.org/x/sys v0.26.0 - golang.org/x/time v0.7.0 - google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 - google.golang.org/grpc v1.65.0 + golang.org/x/sync v0.9.0 + golang.org/x/sys v0.27.0 + golang.org/x/time v0.8.0 + google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 + google.golang.org/grpc v1.68.0 google.golang.org/protobuf v1.35.1 gopkg.in/evanphx/json-patch.v4 v4.12.0 gopkg.in/natefinch/lumberjack.v2 v2.2.1 @@ -80,15 +80,15 @@ require ( github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.23.0 // indirect - github.com/golang-jwt/jwt/v4 v4.5.0 // indirect + github.com/golang-jwt/jwt/v4 v4.5.1 // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect - github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jonboulle/clockwork v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.17.9 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/moby/spdystream v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect @@ -96,9 +96,9 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/pquerna/cachecontrol v0.1.0 // indirect - github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/common v0.60.1 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/soheilhy/cmux v0.1.5 // indirect @@ -107,19 +107,18 @@ require ( github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510 // indirect - go.etcd.io/bbolt v1.3.11 // indirect - go.etcd.io/etcd/client/v2 v2.305.16 // indirect - go.etcd.io/etcd/pkg/v3 v3.5.16 // indirect - go.etcd.io/etcd/raft/v3 v3.5.16 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect + go.etcd.io/bbolt v1.4.0-beta.0 // indirect + go.etcd.io/etcd/client/v2 v2.306.0-alpha.0 // indirect + go.etcd.io/etcd/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd // indirect + go.etcd.io/raft/v3 v3.6.0-alpha.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect golang.org/x/oauth2 v0.23.0 // indirect golang.org/x/term v0.25.0 // indirect - golang.org/x/text v0.19.0 // indirect - google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect + golang.org/x/text v0.20.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/staging/src/k8s.io/apiserver/go.sum b/staging/src/k8s.io/apiserver/go.sum index 11a3e430419b1..9ac4a7ac401af 100644 --- a/staging/src/k8s.io/apiserver/go.sum +++ b/staging/src/k8s.io/apiserver/go.sum @@ -1,126 +1,7 @@ cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo= cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4= -cloud.google.com/go/accessapproval v1.7.4/go.mod h1:/aTEh45LzplQgFYdQdwPMR9YdX0UlhBmvB84uAmQKUc= -cloud.google.com/go/accesscontextmanager v1.8.4/go.mod h1:ParU+WbMpD34s5JFEnGAnPBYAgUHozaTmDJU7aCU9+M= -cloud.google.com/go/aiplatform v1.58.0/go.mod h1:pwZMGvqe0JRkI1GWSZCtnAfrR4K1bv65IHILGA//VEU= -cloud.google.com/go/analytics v0.22.0/go.mod h1:eiROFQKosh4hMaNhF85Oc9WO97Cpa7RggD40e/RBy8w= -cloud.google.com/go/apigateway v1.6.4/go.mod h1:0EpJlVGH5HwAN4VF4Iec8TAzGN1aQgbxAWGJsnPCGGY= -cloud.google.com/go/apigeeconnect v1.6.4/go.mod h1:CapQCWZ8TCjnU0d7PobxhpOdVz/OVJ2Hr/Zcuu1xFx0= -cloud.google.com/go/apigeeregistry v0.8.2/go.mod h1:h4v11TDGdeXJDJvImtgK2AFVvMIgGWjSb0HRnBSjcX8= -cloud.google.com/go/appengine v1.8.4/go.mod h1:TZ24v+wXBujtkK77CXCpjZbnuTvsFNT41MUaZ28D6vg= -cloud.google.com/go/area120 v0.8.4/go.mod h1:jfawXjxf29wyBXr48+W+GyX/f8fflxp642D/bb9v68M= -cloud.google.com/go/artifactregistry v1.14.6/go.mod h1:np9LSFotNWHcjnOgh8UVK0RFPCTUGbO0ve3384xyHfE= -cloud.google.com/go/asset v1.17.0/go.mod h1:yYLfUD4wL4X589A9tYrv4rFrba0QlDeag0CMcM5ggXU= -cloud.google.com/go/assuredworkloads v1.11.4/go.mod h1:4pwwGNwy1RP0m+y12ef3Q/8PaiWrIDQ6nD2E8kvWI9U= -cloud.google.com/go/automl v1.13.4/go.mod h1:ULqwX/OLZ4hBVfKQaMtxMSTlPx0GqGbWN8uA/1EqCP8= -cloud.google.com/go/baremetalsolution v1.2.3/go.mod h1:/UAQ5xG3faDdy180rCUv47e0jvpp3BFxT+Cl0PFjw5g= -cloud.google.com/go/batch v1.7.0/go.mod h1:J64gD4vsNSA2O5TtDB5AAux3nJ9iV8U3ilg3JDBYejU= -cloud.google.com/go/beyondcorp v1.0.3/go.mod h1:HcBvnEd7eYr+HGDd5ZbuVmBYX019C6CEXBonXbCVwJo= -cloud.google.com/go/bigquery v1.58.0/go.mod h1:0eh4mWNY0KrBTjUzLjoYImapGORq9gEPT7MWjCy9lik= -cloud.google.com/go/billing v1.18.0/go.mod h1:5DOYQStCxquGprqfuid/7haD7th74kyMBHkjO/OvDtk= -cloud.google.com/go/binaryauthorization v1.8.0/go.mod h1:VQ/nUGRKhrStlGr+8GMS8f6/vznYLkdK5vaKfdCIpvU= -cloud.google.com/go/certificatemanager v1.7.4/go.mod h1:FHAylPe/6IIKuaRmHbjbdLhGhVQ+CWHSD5Jq0k4+cCE= -cloud.google.com/go/channel v1.17.4/go.mod h1:QcEBuZLGGrUMm7kNj9IbU1ZfmJq2apotsV83hbxX7eE= -cloud.google.com/go/cloudbuild v1.15.0/go.mod h1:eIXYWmRt3UtggLnFGx4JvXcMj4kShhVzGndL1LwleEM= -cloud.google.com/go/clouddms v1.7.3/go.mod h1:fkN2HQQNUYInAU3NQ3vRLkV2iWs8lIdmBKOx4nrL6Hc= -cloud.google.com/go/cloudtasks v1.12.4/go.mod h1:BEPu0Gtt2dU6FxZHNqqNdGqIG86qyWKBPGnsb7udGY0= -cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= -cloud.google.com/go/contactcenterinsights v1.12.1/go.mod h1:HHX5wrz5LHVAwfI2smIotQG9x8Qd6gYilaHcLLLmNis= -cloud.google.com/go/container v1.29.0/go.mod h1:b1A1gJeTBXVLQ6GGw9/9M4FG94BEGsqJ5+t4d/3N7O4= -cloud.google.com/go/containeranalysis v0.11.3/go.mod h1:kMeST7yWFQMGjiG9K7Eov+fPNQcGhb8mXj/UcTiWw9U= -cloud.google.com/go/datacatalog v1.19.2/go.mod h1:2YbODwmhpLM4lOFe3PuEhHK9EyTzQJ5AXgIy7EDKTEE= -cloud.google.com/go/dataflow v0.9.4/go.mod h1:4G8vAkHYCSzU8b/kmsoR2lWyHJD85oMJPHMtan40K8w= -cloud.google.com/go/dataform v0.9.1/go.mod h1:pWTg+zGQ7i16pyn0bS1ruqIE91SdL2FDMvEYu/8oQxs= -cloud.google.com/go/datafusion v1.7.4/go.mod h1:BBs78WTOLYkT4GVZIXQCZT3GFpkpDN4aBY4NDX/jVlM= -cloud.google.com/go/datalabeling v0.8.4/go.mod h1:Z1z3E6LHtffBGrNUkKwbwbDxTiXEApLzIgmymj8A3S8= -cloud.google.com/go/dataplex v1.14.0/go.mod h1:mHJYQQ2VEJHsyoC0OdNyy988DvEbPhqFs5OOLffLX0c= -cloud.google.com/go/dataproc/v2 v2.3.0/go.mod h1:G5R6GBc9r36SXv/RtZIVfB8SipI+xVn0bX5SxUzVYbY= -cloud.google.com/go/dataqna v0.8.4/go.mod h1:mySRKjKg5Lz784P6sCov3p1QD+RZQONRMRjzGNcFd0c= -cloud.google.com/go/datastore v1.15.0/go.mod h1:GAeStMBIt9bPS7jMJA85kgkpsMkvseWWXiaHya9Jes8= -cloud.google.com/go/datastream v1.10.3/go.mod h1:YR0USzgjhqA/Id0Ycu1VvZe8hEWwrkjuXrGbzeDOSEA= -cloud.google.com/go/deploy v1.17.0/go.mod h1:XBr42U5jIr64t92gcpOXxNrqL2PStQCXHuKK5GRUuYo= -cloud.google.com/go/dialogflow v1.48.1/go.mod h1:C1sjs2/g9cEwjCltkKeYp3FFpz8BOzNondEaAlCpt+A= -cloud.google.com/go/dlp v1.11.1/go.mod h1:/PA2EnioBeXTL/0hInwgj0rfsQb3lpE3R8XUJxqUNKI= -cloud.google.com/go/documentai v1.23.7/go.mod h1:ghzBsyVTiVdkfKaUCum/9bGBEyBjDO4GfooEcYKhN+g= -cloud.google.com/go/domains v0.9.4/go.mod h1:27jmJGShuXYdUNjyDG0SodTfT5RwLi7xmH334Gvi3fY= -cloud.google.com/go/edgecontainer v1.1.4/go.mod h1:AvFdVuZuVGdgaE5YvlL1faAoa1ndRR/5XhXZvPBHbsE= -cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= -cloud.google.com/go/essentialcontacts v1.6.5/go.mod h1:jjYbPzw0x+yglXC890l6ECJWdYeZ5dlYACTFL0U/VuM= -cloud.google.com/go/eventarc v1.13.3/go.mod h1:RWH10IAZIRcj1s/vClXkBgMHwh59ts7hSWcqD3kaclg= -cloud.google.com/go/filestore v1.8.0/go.mod h1:S5JCxIbFjeBhWMTfIYH2Jx24J6BqjwpkkPl+nBA5DlI= -cloud.google.com/go/firestore v1.14.0/go.mod h1:96MVaHLsEhbvkBEdZgfN+AS/GIkco1LRpH9Xp9YZfzQ= -cloud.google.com/go/functions v1.15.4/go.mod h1:CAsTc3VlRMVvx+XqXxKqVevguqJpnVip4DdonFsX28I= -cloud.google.com/go/gkebackup v1.3.4/go.mod h1:gLVlbM8h/nHIs09ns1qx3q3eaXcGSELgNu1DWXYz1HI= -cloud.google.com/go/gkeconnect v0.8.4/go.mod h1:84hZz4UMlDCKl8ifVW8layK4WHlMAFeq8vbzjU0yJkw= -cloud.google.com/go/gkehub v0.14.4/go.mod h1:Xispfu2MqnnFt8rV/2/3o73SK1snL8s9dYJ9G2oQMfc= -cloud.google.com/go/gkemulticloud v1.1.0/go.mod h1:7NpJBN94U6DY1xHIbsDqB2+TFZUfjLUKLjUX8NGLor0= -cloud.google.com/go/gsuiteaddons v1.6.4/go.mod h1:rxtstw7Fx22uLOXBpsvb9DUbC+fiXs7rF4U29KHM/pE= -cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8= -cloud.google.com/go/iap v1.9.3/go.mod h1:DTdutSZBqkkOm2HEOTBzhZxh2mwwxshfD/h3yofAiCw= -cloud.google.com/go/ids v1.4.4/go.mod h1:z+WUc2eEl6S/1aZWzwtVNWoSZslgzPxAboS0lZX0HjI= -cloud.google.com/go/iot v1.7.4/go.mod h1:3TWqDVvsddYBG++nHSZmluoCAVGr1hAcabbWZNKEZLk= -cloud.google.com/go/kms v1.15.5/go.mod h1:cU2H5jnp6G2TDpUGZyqTCoy1n16fbubHZjmVXSMtwDI= -cloud.google.com/go/language v1.12.2/go.mod h1:9idWapzr/JKXBBQ4lWqVX/hcadxB194ry20m/bTrhWc= -cloud.google.com/go/lifesciences v0.9.4/go.mod h1:bhm64duKhMi7s9jR9WYJYvjAFJwRqNj+Nia7hF0Z7JA= -cloud.google.com/go/logging v1.9.0/go.mod h1:1Io0vnZv4onoUnsVUQY3HZ3Igb1nBchky0A0y7BBBhE= -cloud.google.com/go/longrunning v0.5.4/go.mod h1:zqNVncI0BOP8ST6XQD1+VcvuShMmq7+xFSzOL++V0dI= -cloud.google.com/go/managedidentities v1.6.4/go.mod h1:WgyaECfHmF00t/1Uk8Oun3CQ2PGUtjc3e9Alh79wyiM= -cloud.google.com/go/maps v1.6.3/go.mod h1:VGAn809ADswi1ASofL5lveOHPnE6Rk/SFTTBx1yuOLw= -cloud.google.com/go/mediatranslation v0.8.4/go.mod h1:9WstgtNVAdN53m6TQa5GjIjLqKQPXe74hwSCxUP6nj4= -cloud.google.com/go/memcache v1.10.4/go.mod h1:v/d8PuC8d1gD6Yn5+I3INzLR01IDn0N4Ym56RgikSI0= -cloud.google.com/go/metastore v1.13.3/go.mod h1:K+wdjXdtkdk7AQg4+sXS8bRrQa9gcOr+foOMF2tqINE= -cloud.google.com/go/monitoring v1.17.0/go.mod h1:KwSsX5+8PnXv5NJnICZzW2R8pWTis8ypC4zmdRD63Tw= -cloud.google.com/go/networkconnectivity v1.14.3/go.mod h1:4aoeFdrJpYEXNvrnfyD5kIzs8YtHg945Og4koAjHQek= -cloud.google.com/go/networkmanagement v1.9.3/go.mod h1:y7WMO1bRLaP5h3Obm4tey+NquUvB93Co1oh4wpL+XcU= -cloud.google.com/go/networksecurity v0.9.4/go.mod h1:E9CeMZ2zDsNBkr8axKSYm8XyTqNhiCHf1JO/Vb8mD1w= -cloud.google.com/go/notebooks v1.11.2/go.mod h1:z0tlHI/lREXC8BS2mIsUeR3agM1AkgLiS+Isov3SS70= -cloud.google.com/go/optimization v1.6.2/go.mod h1:mWNZ7B9/EyMCcwNl1frUGEuY6CPijSkz88Fz2vwKPOY= -cloud.google.com/go/orchestration v1.8.4/go.mod h1:d0lywZSVYtIoSZXb0iFjv9SaL13PGyVOKDxqGxEf/qI= -cloud.google.com/go/orgpolicy v1.12.0/go.mod h1:0+aNV/nrfoTQ4Mytv+Aw+stBDBjNf4d8fYRA9herfJI= -cloud.google.com/go/osconfig v1.12.4/go.mod h1:B1qEwJ/jzqSRslvdOCI8Kdnp0gSng0xW4LOnIebQomA= -cloud.google.com/go/oslogin v1.13.0/go.mod h1:xPJqLwpTZ90LSE5IL1/svko+6c5avZLluiyylMb/sRA= -cloud.google.com/go/phishingprotection v0.8.4/go.mod h1:6b3kNPAc2AQ6jZfFHioZKg9MQNybDg4ixFd4RPZZ2nE= -cloud.google.com/go/policytroubleshooter v1.10.2/go.mod h1:m4uF3f6LseVEnMV6nknlN2vYGRb+75ylQwJdnOXfnv0= -cloud.google.com/go/privatecatalog v0.9.4/go.mod h1:SOjm93f+5hp/U3PqMZAHTtBtluqLygrDrVO8X8tYtG0= -cloud.google.com/go/pubsub v1.34.0/go.mod h1:alj4l4rBg+N3YTFDDC+/YyFTs6JAjam2QfYsddcAW4c= -cloud.google.com/go/pubsublite v1.8.1/go.mod h1:fOLdU4f5xldK4RGJrBMm+J7zMWNj/k4PxwEZXy39QS0= -cloud.google.com/go/recaptchaenterprise/v2 v2.9.0/go.mod h1:Dak54rw6lC2gBY8FBznpOCAR58wKf+R+ZSJRoeJok4w= -cloud.google.com/go/recommendationengine v0.8.4/go.mod h1:GEteCf1PATl5v5ZsQ60sTClUE0phbWmo3rQ1Js8louU= -cloud.google.com/go/recommender v1.12.0/go.mod h1:+FJosKKJSId1MBFeJ/TTyoGQZiEelQQIZMKYYD8ruK4= -cloud.google.com/go/redis v1.14.1/go.mod h1:MbmBxN8bEnQI4doZPC1BzADU4HGocHBk2de3SbgOkqs= -cloud.google.com/go/resourcemanager v1.9.4/go.mod h1:N1dhP9RFvo3lUfwtfLWVxfUWq8+KUQ+XLlHLH3BoFJ0= -cloud.google.com/go/resourcesettings v1.6.4/go.mod h1:pYTTkWdv2lmQcjsthbZLNBP4QW140cs7wqA3DuqErVI= -cloud.google.com/go/retail v1.14.4/go.mod h1:l/N7cMtY78yRnJqp5JW8emy7MB1nz8E4t2yfOmklYfg= -cloud.google.com/go/run v1.3.3/go.mod h1:WSM5pGyJ7cfYyYbONVQBN4buz42zFqwG67Q3ch07iK4= -cloud.google.com/go/scheduler v1.10.5/go.mod h1:MTuXcrJC9tqOHhixdbHDFSIuh7xZF2IysiINDuiq6NI= -cloud.google.com/go/secretmanager v1.11.4/go.mod h1:wreJlbS9Zdq21lMzWmJ0XhWW2ZxgPeahsqeV/vZoJ3w= -cloud.google.com/go/security v1.15.4/go.mod h1:oN7C2uIZKhxCLiAAijKUCuHLZbIt/ghYEo8MqwD/Ty4= -cloud.google.com/go/securitycenter v1.24.3/go.mod h1:l1XejOngggzqwr4Fa2Cn+iWZGf+aBLTXtB/vXjy5vXM= -cloud.google.com/go/servicedirectory v1.11.3/go.mod h1:LV+cHkomRLr67YoQy3Xq2tUXBGOs5z5bPofdq7qtiAw= -cloud.google.com/go/shell v1.7.4/go.mod h1:yLeXB8eKLxw0dpEmXQ/FjriYrBijNsONpwnWsdPqlKM= -cloud.google.com/go/spanner v1.55.0/go.mod h1:HXEznMUVhC+PC+HDyo9YFG2Ajj5BQDkcbqB9Z2Ffxi0= -cloud.google.com/go/speech v1.21.0/go.mod h1:wwolycgONvfz2EDU8rKuHRW3+wc9ILPsAWoikBEWavY= -cloud.google.com/go/storagetransfer v1.10.3/go.mod h1:Up8LY2p6X68SZ+WToswpQbQHnJpOty/ACcMafuey8gc= -cloud.google.com/go/talent v1.6.5/go.mod h1:Mf5cma696HmE+P2BWJ/ZwYqeJXEeU0UqjHFXVLadEDI= -cloud.google.com/go/texttospeech v1.7.4/go.mod h1:vgv0002WvR4liGuSd5BJbWy4nDn5Ozco0uJymY5+U74= -cloud.google.com/go/tpu v1.6.4/go.mod h1:NAm9q3Rq2wIlGnOhpYICNI7+bpBebMJbh0yyp3aNw1Y= -cloud.google.com/go/trace v1.10.4/go.mod h1:Nso99EDIK8Mj5/zmB+iGr9dosS/bzWCJ8wGmE6TXNWY= -cloud.google.com/go/translate v1.10.0/go.mod h1:Kbq9RggWsbqZ9W5YpM94Q1Xv4dshw/gr/SHfsl5yCZ0= -cloud.google.com/go/video v1.20.3/go.mod h1:TnH/mNZKVHeNtpamsSPygSR0iHtvrR/cW1/GDjN5+GU= -cloud.google.com/go/videointelligence v1.11.4/go.mod h1:kPBMAYsTPFiQxMLmmjpcZUMklJp3nC9+ipJJtprccD8= -cloud.google.com/go/vision/v2 v2.7.5/go.mod h1:GcviprJLFfK9OLf0z8Gm6lQb6ZFUulvpZws+mm6yPLM= -cloud.google.com/go/vmmigration v1.7.4/go.mod h1:yBXCmiLaB99hEl/G9ZooNx2GyzgsjKnw5fWcINRgD70= -cloud.google.com/go/vmwareengine v1.0.3/go.mod h1:QSpdZ1stlbfKtyt6Iu19M6XRxjmXO+vb5a/R6Fvy2y4= -cloud.google.com/go/vpcaccess v1.7.4/go.mod h1:lA0KTvhtEOb/VOdnH/gwPuOzGgM+CWsmGu6bb4IoMKk= -cloud.google.com/go/webrisk v1.9.4/go.mod h1:w7m4Ib4C+OseSr2GL66m0zMBywdrVNTDKsdEsfMl7X0= -cloud.google.com/go/websecurityscanner v1.6.4/go.mod h1:mUiyMQ+dGpPPRkHgknIZeCzSHJ45+fY4F52nZFDHm2o= -cloud.google.com/go/workflows v1.12.3/go.mod h1:fmOUeeqEwPzIU81foMjTRQIdwQHADi/vEr1cx9R1m5g= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= @@ -138,13 +19,10 @@ github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA= github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= github.com/coreos/go-oidc v2.2.1+incompatible h1:mh48q/BqXqgjVHpy2ZY7WnWAbenxRjsz9N1i1YxjHAk= @@ -155,7 +33,7 @@ github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8 github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -164,22 +42,15 @@ github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkp github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= @@ -196,31 +67,23 @@ github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= -github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo= +github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g= github.com/google/cel-go v0.22.0/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs0yC4s8= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -229,21 +92,17 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 h1:ad0vkEBuk23VJzZR9nkLVG0YAoN9coASF1GusYX6AlU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= @@ -256,8 +115,8 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -265,6 +124,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= @@ -285,30 +146,27 @@ github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/cachecontrol v0.1.0 h1:yJMy84ti9h/+OEWa752kBTKv4XC30OtVVHYv/8cTqKc= github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= +github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= @@ -321,13 +179,11 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -344,49 +200,45 @@ github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510 h1:S2dVYn90KE98chq github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0= -go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I= -go.etcd.io/etcd/api/v3 v3.5.16 h1:WvmyJVbjWqK4R1E+B12RRHz3bRGy9XVfh++MgbN+6n0= -go.etcd.io/etcd/api/v3 v3.5.16/go.mod h1:1P4SlIP/VwkDmGo3OlOD7faPeP8KDIFhqvciH5EfN28= -go.etcd.io/etcd/client/pkg/v3 v3.5.16 h1:ZgY48uH6UvB+/7R9Yf4x574uCO3jIx0TRDyetSfId3Q= -go.etcd.io/etcd/client/pkg/v3 v3.5.16/go.mod h1:V8acl8pcEK0Y2g19YlOV9m9ssUe6MgiDSobSoaBAM0E= -go.etcd.io/etcd/client/v2 v2.305.16 h1:kQrn9o5czVNaukf2A2At43cE9ZtWauOtf9vRZuiKXow= -go.etcd.io/etcd/client/v2 v2.305.16/go.mod h1:h9YxWCzcdvZENbfzBTFCnoNumr2ax3F19sKMqHFmXHE= -go.etcd.io/etcd/client/v3 v3.5.16 h1:sSmVYOAHeC9doqi0gv7v86oY/BTld0SEFGaxsU9eRhE= -go.etcd.io/etcd/client/v3 v3.5.16/go.mod h1:X+rExSGkyqxvu276cr2OwPLBaeqFu1cIl4vmRjAD/50= -go.etcd.io/etcd/pkg/v3 v3.5.16 h1:cnavs5WSPWeK4TYwPYfmcr3Joz9BH+TZ6qoUtz6/+mc= -go.etcd.io/etcd/pkg/v3 v3.5.16/go.mod h1:+lutCZHG5MBBFI/U4eYT5yL7sJfnexsoM20Y0t2uNuY= -go.etcd.io/etcd/raft/v3 v3.5.16 h1:zBXA3ZUpYs1AwiLGPafYAKKl/CORn/uaxYDwlNwndAk= -go.etcd.io/etcd/raft/v3 v3.5.16/go.mod h1:P4UP14AxofMJ/54boWilabqqWoW9eLodl6I5GdGzazI= -go.etcd.io/etcd/server/v3 v3.5.16 h1:d0/SAdJ3vVsZvF8IFVb1k8zqMZ+heGcNfft71ul9GWE= -go.etcd.io/etcd/server/v3 v3.5.16/go.mod h1:ynhyZZpdDp1Gq49jkUg5mfkDWZwXnn3eIqCqtJnrD/s= -go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= +go.etcd.io/bbolt v1.4.0-beta.0 h1:U7Y9yH6ZojEo5/BDFMXDXD1RNx9L7iKxudzqR68jLaM= +go.etcd.io/bbolt v1.4.0-beta.0/go.mod h1:Qv5yHB6jkQESXT/uVfxJgUPMqgAyhL0GLxcQaz9bSec= +go.etcd.io/etcd/api/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd h1:uuyUQSC6O58OfQH7KlKhmIbHjG/l+lTqyq6Y5vXOHcs= +go.etcd.io/etcd/api/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:kiyBidVS9nGwfQcXcXqUndUJkMBcp0AdZ7sXmwSMD2I= +go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd h1:l8QjUfDmZHqyNMTMwrZvIk+xbvZxOkmZjnFkmO9ktl8= +go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:5B0xZc6aoiWt7n0DYA7s3MrhTRfmp07hY7b/TykJQAs= +go.etcd.io/etcd/client/v2 v2.306.0-alpha.0 h1:9VRJ698EFIMfjOQtcjKMM7CWXOIxp9R4I8JA1mk+WT4= +go.etcd.io/etcd/client/v2 v2.306.0-alpha.0/go.mod h1:eW78BCfOzS1HJgTNzDrb2E6xV1p6kqlpLpKkz7ErzCs= +go.etcd.io/etcd/client/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd h1:Bpoj6mDJcLmMExbqoIFoqYYLzIu0bdVf/7XjP5UUs9E= +go.etcd.io/etcd/client/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:5b6FTfh+1atULPdGeukvpK3u5N8GMskQpb0Z9klI6sU= +go.etcd.io/etcd/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd h1:21vB+bra7pRWMRlZvgdDkb9FXExRshiUeDhTBvYIuLs= +go.etcd.io/etcd/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:FL+PMODrygIxX5lpNEyv1y1GKqFTXa0p/XH31SHpbcw= +go.etcd.io/etcd/server/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd h1:8EoX6i7o6cN9I1KbIpgUBVaY9YFQBRtV3wS68rpEf+s= +go.etcd.io/etcd/server/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:SSZTJkOelKEqXz6mwE1J9aSnZB3G7YROnHOGij32Opk= +go.etcd.io/gofail v0.2.0/go.mod h1:nL3ILMGfkXTekKI3clMBNazKnjUZjYLKmBHzsVAnC1o= +go.etcd.io/raft/v3 v3.6.0-alpha.0 h1:cMmjAEjCKMGiQPowjSWM43Y5ZnBEeNP8RSYcm3ewtns= +go.etcd.io/raft/v3 v3.6.0-alpha.0/go.mod h1:QpxpKeYmocQQFHP75LxNrdJTukZmqQig9lotwYLsUJY= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 h1:yMkBS9yViCc7U7yeLzJPM2XizlfdVvBRSmsQDWu6qc0= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0/go.mod h1:n8MR6/liuGB5EmTETUBeU5ZgqMOlqKRxUaqPQBOANZ8= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= -go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= +go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 h1:IJFEoHiytixx8cMiVAO+GmHR6Frwu+u5Ur8njpFO6Ac= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0/go.mod h1:3rHrKNtLIoS0oZwkY2vxi+oJcwFRWdtUyRII+so45p8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 h1:9kV11HXBHZAvuPUZxmMWrH8hZn/6UnHX4K0mu36vNsU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0/go.mod h1:JyA0FHXe22E1NeNiHmVp7kFHglnexDQ7uRWDiiJ1hKQ= +go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= +go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= +go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= +go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= +go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= +go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -394,69 +246,47 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20211123203042-d83791d6bcd9/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -466,27 +296,12 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= -google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 h1:M0KvPgPmDZHPlbRbaNU1APr28TvwvvdUPlSv7PUvy8g= +google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/grpc v1.68.0 h1:aHQeeJbo8zAkAa3pRzrVjZlbz6uSfeOXlJNQM0RAbz0= +google.golang.org/grpc v1.68.0/go.mod h1:fmSPC5AsjSBCK54MyHRx48kpOti1/jRfOlwEWywNjWA= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -500,14 +315,10 @@ gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/gengo/v2 v2.0.0-20240826214909-a7b603a56eb7/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= diff --git a/staging/src/k8s.io/cli-runtime/go.mod b/staging/src/k8s.io/cli-runtime/go.mod index 3374940b092a0..ba3fd1c1d88c0 100644 --- a/staging/src/k8s.io/cli-runtime/go.mod +++ b/staging/src/k8s.io/cli-runtime/go.mod @@ -15,8 +15,8 @@ require ( github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.9.0 - golang.org/x/sync v0.8.0 - golang.org/x/text v0.19.0 + golang.org/x/sync v0.9.0 + golang.org/x/text v0.20.0 gopkg.in/evanphx/json-patch.v4 v4.12.0 k8s.io/api v0.0.0 k8s.io/apimachinery v0.0.0 @@ -42,7 +42,7 @@ require ( github.com/go-openapi/swag v0.23.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/google/btree v1.0.1 // indirect + github.com/google/btree v1.1.3 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect @@ -61,9 +61,9 @@ require ( github.com/xlab/treeprint v1.2.0 // indirect golang.org/x/net v0.30.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/sys v0.26.0 // indirect + golang.org/x/sys v0.27.0 // indirect golang.org/x/term v0.25.0 // indirect - golang.org/x/time v0.7.0 // indirect + golang.org/x/time v0.8.0 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/staging/src/k8s.io/cli-runtime/go.sum b/staging/src/k8s.io/cli-runtime/go.sum index fdd3895bceb06..07c2c7fa9ecd6 100644 --- a/staging/src/k8s.io/cli-runtime/go.sum +++ b/staging/src/k8s.io/cli-runtime/go.sum @@ -36,8 +36,8 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -98,8 +98,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= @@ -145,22 +145,22 @@ golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbht golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= diff --git a/staging/src/k8s.io/client-go/go.mod b/staging/src/k8s.io/client-go/go.mod index 9172bf5cd1a69..34f9793f04d24 100644 --- a/staging/src/k8s.io/client-go/go.mod +++ b/staging/src/k8s.io/client-go/go.mod @@ -23,7 +23,7 @@ require ( golang.org/x/net v0.30.0 golang.org/x/oauth2 v0.23.0 golang.org/x/term v0.25.0 - golang.org/x/time v0.7.0 + golang.org/x/time v0.8.0 google.golang.org/protobuf v1.35.1 gopkg.in/evanphx/json-patch.v4 v4.12.0 k8s.io/api v0.0.0 @@ -45,7 +45,7 @@ require ( github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.23.0 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect - github.com/google/btree v1.0.1 // indirect + github.com/google/btree v1.1.3 // indirect github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect @@ -58,8 +58,8 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/x448/float16 v0.8.4 // indirect - golang.org/x/sys v0.26.0 // indirect - golang.org/x/text v0.19.0 // indirect + golang.org/x/sys v0.27.0 // indirect + golang.org/x/text v0.20.0 // indirect golang.org/x/tools v0.26.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/staging/src/k8s.io/client-go/go.sum b/staging/src/k8s.io/client-go/go.sum index 378e80b311a10..c2924e2b25b93 100644 --- a/staging/src/k8s.io/client-go/go.sum +++ b/staging/src/k8s.io/client-go/go.sum @@ -29,8 +29,8 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -85,8 +85,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -124,21 +124,21 @@ golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbht golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0= golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= diff --git a/staging/src/k8s.io/cloud-provider/go.mod b/staging/src/k8s.io/cloud-provider/go.mod index 2e60cf1d0c815..ee591b61e0537 100644 --- a/staging/src/k8s.io/cloud-provider/go.mod +++ b/staging/src/k8s.io/cloud-provider/go.mod @@ -47,16 +47,18 @@ require ( github.com/go-openapi/swag v0.23.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/google/btree v1.0.1 // indirect + github.com/google/btree v1.1.3 // indirect github.com/google/cel-go v0.22.0 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.6.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.17.9 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect @@ -64,23 +66,23 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/common v0.60.1 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect github.com/x448/float16 v0.8.4 // indirect - go.etcd.io/etcd/api/v3 v3.5.16 // indirect - go.etcd.io/etcd/client/pkg/v3 v3.5.16 // indirect - go.etcd.io/etcd/client/v3 v3.5.16 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect + go.etcd.io/etcd/api/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd // indirect + go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd // indirect + go.etcd.io/etcd/client/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect - go.opentelemetry.io/otel v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect - go.opentelemetry.io/otel/metric v1.28.0 // indirect - go.opentelemetry.io/otel/sdk v1.28.0 // indirect - go.opentelemetry.io/otel/trace v1.28.0 // indirect + go.opentelemetry.io/otel v1.32.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 // indirect + go.opentelemetry.io/otel/metric v1.32.0 // indirect + go.opentelemetry.io/otel/sdk v1.32.0 // indirect + go.opentelemetry.io/otel/trace v1.32.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect @@ -88,14 +90,14 @@ require ( golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect golang.org/x/net v0.30.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.26.0 // indirect + golang.org/x/sync v0.9.0 // indirect + golang.org/x/sys v0.27.0 // indirect golang.org/x/term v0.25.0 // indirect - golang.org/x/text v0.19.0 // indirect - golang.org/x/time v0.7.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect - google.golang.org/grpc v1.65.0 // indirect + golang.org/x/text v0.20.0 // indirect + golang.org/x/time v0.8.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect + google.golang.org/grpc v1.68.0 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/staging/src/k8s.io/cloud-provider/go.sum b/staging/src/k8s.io/cloud-provider/go.sum index 018b07378daeb..4cf674230d90c 100644 --- a/staging/src/k8s.io/cloud-provider/go.sum +++ b/staging/src/k8s.io/cloud-provider/go.sum @@ -1,6 +1,6 @@ cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo= cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= @@ -22,7 +22,7 @@ github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyY github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= @@ -40,8 +40,8 @@ github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkp github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= -github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= @@ -70,13 +70,13 @@ github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZ github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= -github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo= +github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g= github.com/google/cel-go v0.22.0/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs0yC4s8= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= @@ -94,14 +94,10 @@ github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 h1:ad0vkEBuk23VJzZR9nkLVG0YAoN9coASF1GusYX6AlU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= @@ -114,6 +110,8 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -121,6 +119,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= @@ -143,21 +143,22 @@ github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7y github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= +github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= @@ -188,41 +189,40 @@ github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510 h1:S2dVYn90KE98chq github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0= -go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I= -go.etcd.io/etcd/api/v3 v3.5.16 h1:WvmyJVbjWqK4R1E+B12RRHz3bRGy9XVfh++MgbN+6n0= -go.etcd.io/etcd/api/v3 v3.5.16/go.mod h1:1P4SlIP/VwkDmGo3OlOD7faPeP8KDIFhqvciH5EfN28= -go.etcd.io/etcd/client/pkg/v3 v3.5.16 h1:ZgY48uH6UvB+/7R9Yf4x574uCO3jIx0TRDyetSfId3Q= -go.etcd.io/etcd/client/pkg/v3 v3.5.16/go.mod h1:V8acl8pcEK0Y2g19YlOV9m9ssUe6MgiDSobSoaBAM0E= -go.etcd.io/etcd/client/v2 v2.305.16 h1:kQrn9o5czVNaukf2A2At43cE9ZtWauOtf9vRZuiKXow= -go.etcd.io/etcd/client/v2 v2.305.16/go.mod h1:h9YxWCzcdvZENbfzBTFCnoNumr2ax3F19sKMqHFmXHE= -go.etcd.io/etcd/client/v3 v3.5.16 h1:sSmVYOAHeC9doqi0gv7v86oY/BTld0SEFGaxsU9eRhE= -go.etcd.io/etcd/client/v3 v3.5.16/go.mod h1:X+rExSGkyqxvu276cr2OwPLBaeqFu1cIl4vmRjAD/50= -go.etcd.io/etcd/pkg/v3 v3.5.16 h1:cnavs5WSPWeK4TYwPYfmcr3Joz9BH+TZ6qoUtz6/+mc= -go.etcd.io/etcd/pkg/v3 v3.5.16/go.mod h1:+lutCZHG5MBBFI/U4eYT5yL7sJfnexsoM20Y0t2uNuY= -go.etcd.io/etcd/raft/v3 v3.5.16 h1:zBXA3ZUpYs1AwiLGPafYAKKl/CORn/uaxYDwlNwndAk= -go.etcd.io/etcd/raft/v3 v3.5.16/go.mod h1:P4UP14AxofMJ/54boWilabqqWoW9eLodl6I5GdGzazI= -go.etcd.io/etcd/server/v3 v3.5.16 h1:d0/SAdJ3vVsZvF8IFVb1k8zqMZ+heGcNfft71ul9GWE= -go.etcd.io/etcd/server/v3 v3.5.16/go.mod h1:ynhyZZpdDp1Gq49jkUg5mfkDWZwXnn3eIqCqtJnrD/s= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= +go.etcd.io/bbolt v1.4.0-beta.0 h1:U7Y9yH6ZojEo5/BDFMXDXD1RNx9L7iKxudzqR68jLaM= +go.etcd.io/bbolt v1.4.0-beta.0/go.mod h1:Qv5yHB6jkQESXT/uVfxJgUPMqgAyhL0GLxcQaz9bSec= +go.etcd.io/etcd/api/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd h1:uuyUQSC6O58OfQH7KlKhmIbHjG/l+lTqyq6Y5vXOHcs= +go.etcd.io/etcd/api/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:kiyBidVS9nGwfQcXcXqUndUJkMBcp0AdZ7sXmwSMD2I= +go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd h1:l8QjUfDmZHqyNMTMwrZvIk+xbvZxOkmZjnFkmO9ktl8= +go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:5B0xZc6aoiWt7n0DYA7s3MrhTRfmp07hY7b/TykJQAs= +go.etcd.io/etcd/client/v2 v2.306.0-alpha.0 h1:9VRJ698EFIMfjOQtcjKMM7CWXOIxp9R4I8JA1mk+WT4= +go.etcd.io/etcd/client/v2 v2.306.0-alpha.0/go.mod h1:eW78BCfOzS1HJgTNzDrb2E6xV1p6kqlpLpKkz7ErzCs= +go.etcd.io/etcd/client/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd h1:Bpoj6mDJcLmMExbqoIFoqYYLzIu0bdVf/7XjP5UUs9E= +go.etcd.io/etcd/client/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:5b6FTfh+1atULPdGeukvpK3u5N8GMskQpb0Z9klI6sU= +go.etcd.io/etcd/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd h1:21vB+bra7pRWMRlZvgdDkb9FXExRshiUeDhTBvYIuLs= +go.etcd.io/etcd/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:FL+PMODrygIxX5lpNEyv1y1GKqFTXa0p/XH31SHpbcw= +go.etcd.io/etcd/server/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd h1:8EoX6i7o6cN9I1KbIpgUBVaY9YFQBRtV3wS68rpEf+s= +go.etcd.io/etcd/server/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:SSZTJkOelKEqXz6mwE1J9aSnZB3G7YROnHOGij32Opk= +go.etcd.io/raft/v3 v3.6.0-alpha.0 h1:cMmjAEjCKMGiQPowjSWM43Y5ZnBEeNP8RSYcm3ewtns= +go.etcd.io/raft/v3 v3.6.0-alpha.0/go.mod h1:QpxpKeYmocQQFHP75LxNrdJTukZmqQig9lotwYLsUJY= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 h1:yMkBS9yViCc7U7yeLzJPM2XizlfdVvBRSmsQDWu6qc0= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0/go.mod h1:n8MR6/liuGB5EmTETUBeU5ZgqMOlqKRxUaqPQBOANZ8= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= -go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= +go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 h1:IJFEoHiytixx8cMiVAO+GmHR6Frwu+u5Ur8njpFO6Ac= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0/go.mod h1:3rHrKNtLIoS0oZwkY2vxi+oJcwFRWdtUyRII+so45p8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 h1:9kV11HXBHZAvuPUZxmMWrH8hZn/6UnHX4K0mu36vNsU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0/go.mod h1:JyA0FHXe22E1NeNiHmVp7kFHglnexDQ7uRWDiiJ1hKQ= +go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= +go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= +go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= +go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= +go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= +go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -250,22 +250,22 @@ golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbht golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -276,15 +276,12 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= -google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 h1:M0KvPgPmDZHPlbRbaNU1APr28TvwvvdUPlSv7PUvy8g= +google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/grpc v1.68.0 h1:aHQeeJbo8zAkAa3pRzrVjZlbz6uSfeOXlJNQM0RAbz0= +google.golang.org/grpc v1.68.0/go.mod h1:fmSPC5AsjSBCK54MyHRx48kpOti1/jRfOlwEWywNjWA= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/staging/src/k8s.io/cluster-bootstrap/go.mod b/staging/src/k8s.io/cluster-bootstrap/go.mod index 52b37185a76ea..7e7df388440b9 100644 --- a/staging/src/k8s.io/cluster-bootstrap/go.mod +++ b/staging/src/k8s.io/cluster-bootstrap/go.mod @@ -28,7 +28,7 @@ require ( github.com/x448/float16 v0.8.4 // indirect golang.org/x/crypto v0.28.0 // indirect golang.org/x/net v0.30.0 // indirect - golang.org/x/text v0.19.0 // indirect + golang.org/x/text v0.20.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect diff --git a/staging/src/k8s.io/cluster-bootstrap/go.sum b/staging/src/k8s.io/cluster-bootstrap/go.sum index 3cb8bab46e34a..b456f4fce4a94 100644 --- a/staging/src/k8s.io/cluster-bootstrap/go.sum +++ b/staging/src/k8s.io/cluster-bootstrap/go.sum @@ -47,8 +47,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -77,17 +77,17 @@ golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= diff --git a/staging/src/k8s.io/code-generator/examples/go.mod b/staging/src/k8s.io/code-generator/examples/go.mod index bd5004269a8dd..0f07a8429106d 100644 --- a/staging/src/k8s.io/code-generator/examples/go.mod +++ b/staging/src/k8s.io/code-generator/examples/go.mod @@ -39,10 +39,10 @@ require ( github.com/x448/float16 v0.8.4 // indirect golang.org/x/net v0.30.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/sys v0.26.0 // indirect + golang.org/x/sys v0.27.0 // indirect golang.org/x/term v0.25.0 // indirect - golang.org/x/text v0.19.0 // indirect - golang.org/x/time v0.7.0 // indirect + golang.org/x/text v0.20.0 // indirect + golang.org/x/time v0.8.0 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/staging/src/k8s.io/code-generator/examples/go.sum b/staging/src/k8s.io/code-generator/examples/go.sum index 1b79bdfce540b..849875e60c3cd 100644 --- a/staging/src/k8s.io/code-generator/examples/go.sum +++ b/staging/src/k8s.io/code-generator/examples/go.sum @@ -66,8 +66,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -102,16 +102,16 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= diff --git a/staging/src/k8s.io/code-generator/go.mod b/staging/src/k8s.io/code-generator/go.mod index 8b742f8763e78..6d8b4378539cb 100644 --- a/staging/src/k8s.io/code-generator/go.mod +++ b/staging/src/k8s.io/code-generator/go.mod @@ -12,7 +12,7 @@ require ( github.com/google/go-cmp v0.6.0 github.com/google/gofuzz v1.2.0 github.com/spf13/pflag v1.0.5 - golang.org/x/text v0.19.0 + golang.org/x/text v0.20.0 k8s.io/apimachinery v0.0.0 k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 k8s.io/klog/v2 v2.130.1 @@ -36,7 +36,7 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/x448/float16 v0.8.4 // indirect golang.org/x/mod v0.21.0 // indirect - golang.org/x/sync v0.8.0 // indirect + golang.org/x/sync v0.9.0 // indirect golang.org/x/tools v0.26.0 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/staging/src/k8s.io/code-generator/go.sum b/staging/src/k8s.io/code-generator/go.sum index 268ddf453291f..6e417eecd64bc 100644 --- a/staging/src/k8s.io/code-generator/go.sum +++ b/staging/src/k8s.io/code-generator/go.sum @@ -68,8 +68,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -102,19 +102,19 @@ golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= diff --git a/staging/src/k8s.io/component-base/go.mod b/staging/src/k8s.io/component-base/go.mod index e7593ab634e2f..a0a210a329222 100644 --- a/staging/src/k8s.io/component-base/go.mod +++ b/staging/src/k8s.io/component-base/go.mod @@ -13,20 +13,20 @@ require ( github.com/google/go-cmp v0.6.0 github.com/moby/term v0.5.0 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 - github.com/prometheus/client_golang v1.19.1 + github.com/prometheus/client_golang v1.20.5 github.com/prometheus/client_model v0.6.1 - github.com/prometheus/common v0.55.0 + github.com/prometheus/common v0.60.1 github.com/prometheus/procfs v0.15.1 github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.9.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 - go.opentelemetry.io/otel v1.28.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 - go.opentelemetry.io/otel/sdk v1.28.0 - go.opentelemetry.io/otel/trace v1.28.0 + go.opentelemetry.io/otel v1.32.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 + go.opentelemetry.io/otel/sdk v1.32.0 + go.opentelemetry.io/otel/trace v1.32.0 go.uber.org/zap v1.27.0 - golang.org/x/sys v0.26.0 + golang.org/x/sys v0.27.0 k8s.io/apimachinery v0.0.0 k8s.io/client-go v0.0.0 k8s.io/klog/v2 v2.130.1 @@ -53,28 +53,30 @@ require ( github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.17.9 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/x448/float16 v0.8.4 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect - go.opentelemetry.io/otel/metric v1.28.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 // indirect + go.opentelemetry.io/otel/metric v1.32.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/net v0.30.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect golang.org/x/term v0.25.0 // indirect - golang.org/x/text v0.19.0 // indirect - golang.org/x/time v0.7.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect - google.golang.org/grpc v1.65.0 // indirect + golang.org/x/text v0.20.0 // indirect + golang.org/x/time v0.8.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect + google.golang.org/grpc v1.68.0 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/staging/src/k8s.io/component-base/go.sum b/staging/src/k8s.io/component-base/go.sum index da154e5cc24e2..5dbcb4e171bf1 100644 --- a/staging/src/k8s.io/component-base/go.sum +++ b/staging/src/k8s.io/component-base/go.sum @@ -1,5 +1,5 @@ -cel.dev/expr v0.15.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cel.dev/expr v0.16.1/go.mod h1:AsGA5zb3WruAEQeQng1RZdGEXmBj0jvMWh6l5SnNuC8= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= @@ -17,7 +17,7 @@ github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyY github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= @@ -28,8 +28,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= -github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= @@ -55,10 +55,10 @@ github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1v github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -73,8 +73,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 h1:ad0vkEBuk23VJzZR9nkLVG0YAoN9coASF1GusYX6AlU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -85,6 +85,8 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -92,6 +94,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= @@ -113,20 +117,21 @@ github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7y github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= +github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= @@ -149,18 +154,18 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= -go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= +go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 h1:IJFEoHiytixx8cMiVAO+GmHR6Frwu+u5Ur8njpFO6Ac= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0/go.mod h1:3rHrKNtLIoS0oZwkY2vxi+oJcwFRWdtUyRII+so45p8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 h1:9kV11HXBHZAvuPUZxmMWrH8hZn/6UnHX4K0mu36vNsU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0/go.mod h1:JyA0FHXe22E1NeNiHmVp7kFHglnexDQ7uRWDiiJ1hKQ= +go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= +go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= +go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= +go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= +go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= +go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -187,21 +192,21 @@ golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbht golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -212,13 +217,12 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 h1:M0KvPgPmDZHPlbRbaNU1APr28TvwvvdUPlSv7PUvy8g= +google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/grpc v1.68.0 h1:aHQeeJbo8zAkAa3pRzrVjZlbz6uSfeOXlJNQM0RAbz0= +google.golang.org/grpc v1.68.0/go.mod h1:fmSPC5AsjSBCK54MyHRx48kpOti1/jRfOlwEWywNjWA= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/staging/src/k8s.io/component-helpers/go.mod b/staging/src/k8s.io/component-helpers/go.mod index 1bc4da80b8223..95eb1034d4624 100644 --- a/staging/src/k8s.io/component-helpers/go.mod +++ b/staging/src/k8s.io/component-helpers/go.mod @@ -40,10 +40,10 @@ require ( github.com/x448/float16 v0.8.4 // indirect golang.org/x/net v0.30.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/sys v0.26.0 // indirect + golang.org/x/sys v0.27.0 // indirect golang.org/x/term v0.25.0 // indirect - golang.org/x/text v0.19.0 // indirect - golang.org/x/time v0.7.0 // indirect + golang.org/x/text v0.20.0 // indirect + golang.org/x/time v0.8.0 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/staging/src/k8s.io/component-helpers/go.sum b/staging/src/k8s.io/component-helpers/go.sum index eb695ac7be4dc..828e6ba0167a4 100644 --- a/staging/src/k8s.io/component-helpers/go.sum +++ b/staging/src/k8s.io/component-helpers/go.sum @@ -27,7 +27,7 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -76,8 +76,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -113,20 +113,20 @@ golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbht golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= diff --git a/staging/src/k8s.io/controller-manager/go.mod b/staging/src/k8s.io/controller-manager/go.mod index e296b56561d60..602da54cb6245 100644 --- a/staging/src/k8s.io/controller-manager/go.mod +++ b/staging/src/k8s.io/controller-manager/go.mod @@ -48,48 +48,50 @@ require ( github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.6.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.17.9 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/common v0.60.1 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect github.com/x448/float16 v0.8.4 // indirect - go.etcd.io/etcd/api/v3 v3.5.16 // indirect - go.etcd.io/etcd/client/pkg/v3 v3.5.16 // indirect - go.etcd.io/etcd/client/v3 v3.5.16 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect + go.etcd.io/etcd/api/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd // indirect + go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd // indirect + go.etcd.io/etcd/client/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect - go.opentelemetry.io/otel v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect - go.opentelemetry.io/otel/metric v1.28.0 // indirect - go.opentelemetry.io/otel/sdk v1.28.0 // indirect - go.opentelemetry.io/otel/trace v1.28.0 // indirect + go.opentelemetry.io/otel v1.32.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 // indirect + go.opentelemetry.io/otel/metric v1.32.0 // indirect + go.opentelemetry.io/otel/sdk v1.32.0 // indirect + go.opentelemetry.io/otel/trace v1.32.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/crypto v0.28.0 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect golang.org/x/net v0.30.0 // indirect - golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.26.0 // indirect + golang.org/x/sync v0.9.0 // indirect + golang.org/x/sys v0.27.0 // indirect golang.org/x/term v0.25.0 // indirect - golang.org/x/text v0.19.0 // indirect - golang.org/x/time v0.7.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect - google.golang.org/grpc v1.65.0 // indirect + golang.org/x/text v0.20.0 // indirect + golang.org/x/time v0.8.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect + google.golang.org/grpc v1.68.0 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/staging/src/k8s.io/controller-manager/go.sum b/staging/src/k8s.io/controller-manager/go.sum index 723f969914195..b1b54ba9a2a19 100644 --- a/staging/src/k8s.io/controller-manager/go.sum +++ b/staging/src/k8s.io/controller-manager/go.sum @@ -1,6 +1,6 @@ cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo= cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= @@ -21,7 +21,7 @@ github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyY github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= @@ -37,8 +37,8 @@ github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkp github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= -github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= @@ -67,13 +67,13 @@ github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZ github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= -github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo= +github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g= github.com/google/cel-go v0.22.0/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs0yC4s8= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= @@ -91,14 +91,10 @@ github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 h1:ad0vkEBuk23VJzZR9nkLVG0YAoN9coASF1GusYX6AlU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= @@ -111,6 +107,8 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -118,6 +116,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= @@ -139,21 +139,22 @@ github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7y github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= +github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= @@ -184,41 +185,40 @@ github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510 h1:S2dVYn90KE98chq github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0= -go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I= -go.etcd.io/etcd/api/v3 v3.5.16 h1:WvmyJVbjWqK4R1E+B12RRHz3bRGy9XVfh++MgbN+6n0= -go.etcd.io/etcd/api/v3 v3.5.16/go.mod h1:1P4SlIP/VwkDmGo3OlOD7faPeP8KDIFhqvciH5EfN28= -go.etcd.io/etcd/client/pkg/v3 v3.5.16 h1:ZgY48uH6UvB+/7R9Yf4x574uCO3jIx0TRDyetSfId3Q= -go.etcd.io/etcd/client/pkg/v3 v3.5.16/go.mod h1:V8acl8pcEK0Y2g19YlOV9m9ssUe6MgiDSobSoaBAM0E= -go.etcd.io/etcd/client/v2 v2.305.16 h1:kQrn9o5czVNaukf2A2At43cE9ZtWauOtf9vRZuiKXow= -go.etcd.io/etcd/client/v2 v2.305.16/go.mod h1:h9YxWCzcdvZENbfzBTFCnoNumr2ax3F19sKMqHFmXHE= -go.etcd.io/etcd/client/v3 v3.5.16 h1:sSmVYOAHeC9doqi0gv7v86oY/BTld0SEFGaxsU9eRhE= -go.etcd.io/etcd/client/v3 v3.5.16/go.mod h1:X+rExSGkyqxvu276cr2OwPLBaeqFu1cIl4vmRjAD/50= -go.etcd.io/etcd/pkg/v3 v3.5.16 h1:cnavs5WSPWeK4TYwPYfmcr3Joz9BH+TZ6qoUtz6/+mc= -go.etcd.io/etcd/pkg/v3 v3.5.16/go.mod h1:+lutCZHG5MBBFI/U4eYT5yL7sJfnexsoM20Y0t2uNuY= -go.etcd.io/etcd/raft/v3 v3.5.16 h1:zBXA3ZUpYs1AwiLGPafYAKKl/CORn/uaxYDwlNwndAk= -go.etcd.io/etcd/raft/v3 v3.5.16/go.mod h1:P4UP14AxofMJ/54boWilabqqWoW9eLodl6I5GdGzazI= -go.etcd.io/etcd/server/v3 v3.5.16 h1:d0/SAdJ3vVsZvF8IFVb1k8zqMZ+heGcNfft71ul9GWE= -go.etcd.io/etcd/server/v3 v3.5.16/go.mod h1:ynhyZZpdDp1Gq49jkUg5mfkDWZwXnn3eIqCqtJnrD/s= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= +go.etcd.io/bbolt v1.4.0-beta.0 h1:U7Y9yH6ZojEo5/BDFMXDXD1RNx9L7iKxudzqR68jLaM= +go.etcd.io/bbolt v1.4.0-beta.0/go.mod h1:Qv5yHB6jkQESXT/uVfxJgUPMqgAyhL0GLxcQaz9bSec= +go.etcd.io/etcd/api/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd h1:uuyUQSC6O58OfQH7KlKhmIbHjG/l+lTqyq6Y5vXOHcs= +go.etcd.io/etcd/api/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:kiyBidVS9nGwfQcXcXqUndUJkMBcp0AdZ7sXmwSMD2I= +go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd h1:l8QjUfDmZHqyNMTMwrZvIk+xbvZxOkmZjnFkmO9ktl8= +go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:5B0xZc6aoiWt7n0DYA7s3MrhTRfmp07hY7b/TykJQAs= +go.etcd.io/etcd/client/v2 v2.306.0-alpha.0 h1:9VRJ698EFIMfjOQtcjKMM7CWXOIxp9R4I8JA1mk+WT4= +go.etcd.io/etcd/client/v2 v2.306.0-alpha.0/go.mod h1:eW78BCfOzS1HJgTNzDrb2E6xV1p6kqlpLpKkz7ErzCs= +go.etcd.io/etcd/client/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd h1:Bpoj6mDJcLmMExbqoIFoqYYLzIu0bdVf/7XjP5UUs9E= +go.etcd.io/etcd/client/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:5b6FTfh+1atULPdGeukvpK3u5N8GMskQpb0Z9klI6sU= +go.etcd.io/etcd/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd h1:21vB+bra7pRWMRlZvgdDkb9FXExRshiUeDhTBvYIuLs= +go.etcd.io/etcd/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:FL+PMODrygIxX5lpNEyv1y1GKqFTXa0p/XH31SHpbcw= +go.etcd.io/etcd/server/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd h1:8EoX6i7o6cN9I1KbIpgUBVaY9YFQBRtV3wS68rpEf+s= +go.etcd.io/etcd/server/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:SSZTJkOelKEqXz6mwE1J9aSnZB3G7YROnHOGij32Opk= +go.etcd.io/raft/v3 v3.6.0-alpha.0 h1:cMmjAEjCKMGiQPowjSWM43Y5ZnBEeNP8RSYcm3ewtns= +go.etcd.io/raft/v3 v3.6.0-alpha.0/go.mod h1:QpxpKeYmocQQFHP75LxNrdJTukZmqQig9lotwYLsUJY= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 h1:yMkBS9yViCc7U7yeLzJPM2XizlfdVvBRSmsQDWu6qc0= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0/go.mod h1:n8MR6/liuGB5EmTETUBeU5ZgqMOlqKRxUaqPQBOANZ8= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= -go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= +go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 h1:IJFEoHiytixx8cMiVAO+GmHR6Frwu+u5Ur8njpFO6Ac= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0/go.mod h1:3rHrKNtLIoS0oZwkY2vxi+oJcwFRWdtUyRII+so45p8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 h1:9kV11HXBHZAvuPUZxmMWrH8hZn/6UnHX4K0mu36vNsU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0/go.mod h1:JyA0FHXe22E1NeNiHmVp7kFHglnexDQ7uRWDiiJ1hKQ= +go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= +go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= +go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= +go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= +go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= +go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -246,21 +246,21 @@ golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbht golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -271,15 +271,12 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= -google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 h1:M0KvPgPmDZHPlbRbaNU1APr28TvwvvdUPlSv7PUvy8g= +google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/grpc v1.68.0 h1:aHQeeJbo8zAkAa3pRzrVjZlbz6uSfeOXlJNQM0RAbz0= +google.golang.org/grpc v1.68.0/go.mod h1:fmSPC5AsjSBCK54MyHRx48kpOti1/jRfOlwEWywNjWA= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/staging/src/k8s.io/cri-api/go.mod b/staging/src/k8s.io/cri-api/go.mod index a6d427d2dcc9d..cbf1c47da0580 100644 --- a/staging/src/k8s.io/cri-api/go.mod +++ b/staging/src/k8s.io/cri-api/go.mod @@ -9,18 +9,18 @@ godebug default=go1.23 require ( github.com/gogo/protobuf v1.3.2 github.com/stretchr/testify v1.9.0 - google.golang.org/grpc v1.65.0 + google.golang.org/grpc v1.68.0 ) require ( github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/kr/pretty v0.3.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/rogpeppe/go-internal v1.12.0 // indirect + github.com/rogpeppe/go-internal v1.13.1 // indirect golang.org/x/net v0.30.0 // indirect - golang.org/x/sys v0.26.0 // indirect - golang.org/x/text v0.19.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect + golang.org/x/sys v0.27.0 // indirect + golang.org/x/text v0.20.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/staging/src/k8s.io/cri-api/go.sum b/staging/src/k8s.io/cri-api/go.sum index 8157de0f4321e..a1033c23d1ad2 100644 --- a/staging/src/k8s.io/cri-api/go.sum +++ b/staging/src/k8s.io/cri-api/go.sum @@ -1,16 +1,17 @@ -cel.dev/expr v0.15.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cel.dev/expr v0.16.1/go.mod h1:AsGA5zb3WruAEQeQng1RZdGEXmBj0jvMWh6l5SnNuC8= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= -github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -25,11 +26,12 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= @@ -41,42 +43,42 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= -golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/grpc v1.68.0 h1:aHQeeJbo8zAkAa3pRzrVjZlbz6uSfeOXlJNQM0RAbz0= +google.golang.org/grpc v1.68.0/go.mod h1:fmSPC5AsjSBCK54MyHRx48kpOti1/jRfOlwEWywNjWA= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/staging/src/k8s.io/cri-client/go.mod b/staging/src/k8s.io/cri-client/go.mod index ebff7f52606dc..a9cbd0af8ea8d 100644 --- a/staging/src/k8s.io/cri-client/go.mod +++ b/staging/src/k8s.io/cri-client/go.mod @@ -10,11 +10,11 @@ require ( github.com/Microsoft/go-winio v0.6.2 github.com/fsnotify/fsnotify v1.7.0 github.com/stretchr/testify v1.9.0 - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 - go.opentelemetry.io/otel/sdk v1.28.0 - go.opentelemetry.io/otel/trace v1.28.0 - golang.org/x/sys v0.26.0 - google.golang.org/grpc v1.65.0 + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 + go.opentelemetry.io/otel/sdk v1.32.0 + go.opentelemetry.io/otel/trace v1.32.0 + golang.org/x/sys v0.27.0 + google.golang.org/grpc v1.68.0 k8s.io/api v0.0.0 k8s.io/apimachinery v0.0.0 k8s.io/client-go v0.0.0 @@ -43,34 +43,35 @@ require ( github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.17.9 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/common v0.60.1 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/x448/float16 v0.8.4 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect - go.opentelemetry.io/otel v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect - go.opentelemetry.io/otel/metric v1.28.0 // indirect + go.opentelemetry.io/otel v1.32.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 // indirect + go.opentelemetry.io/otel/metric v1.32.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect golang.org/x/net v0.30.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/text v0.19.0 // indirect - golang.org/x/time v0.7.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect + golang.org/x/text v0.20.0 // indirect + golang.org/x/time v0.8.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/staging/src/k8s.io/cri-client/go.sum b/staging/src/k8s.io/cri-client/go.sum index 0f949091505ba..c530fe62d5386 100644 --- a/staging/src/k8s.io/cri-client/go.sum +++ b/staging/src/k8s.io/cri-client/go.sum @@ -1,5 +1,5 @@ -cel.dev/expr v0.15.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cel.dev/expr v0.16.1/go.mod h1:AsGA5zb3WruAEQeQng1RZdGEXmBj0jvMWh6l5SnNuC8= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= @@ -18,7 +18,7 @@ github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyY github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -27,8 +27,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= -github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= @@ -54,10 +54,10 @@ github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -71,8 +71,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 h1:ad0vkEBuk23VJzZR9nkLVG0YAoN9coASF1GusYX6AlU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -83,6 +83,8 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -90,6 +92,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= @@ -107,20 +111,21 @@ github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= +github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= @@ -142,22 +147,22 @@ github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcY github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 h1:yMkBS9yViCc7U7yeLzJPM2XizlfdVvBRSmsQDWu6qc0= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0/go.mod h1:n8MR6/liuGB5EmTETUBeU5ZgqMOlqKRxUaqPQBOANZ8= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= -go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= +go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 h1:IJFEoHiytixx8cMiVAO+GmHR6Frwu+u5Ur8njpFO6Ac= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0/go.mod h1:3rHrKNtLIoS0oZwkY2vxi+oJcwFRWdtUyRII+so45p8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 h1:9kV11HXBHZAvuPUZxmMWrH8hZn/6UnHX4K0mu36vNsU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0/go.mod h1:JyA0FHXe22E1NeNiHmVp7kFHglnexDQ7uRWDiiJ1hKQ= +go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= +go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= +go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= +go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= +go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= +go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -182,19 +187,19 @@ golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbht golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -204,13 +209,12 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 h1:M0KvPgPmDZHPlbRbaNU1APr28TvwvvdUPlSv7PUvy8g= +google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/grpc v1.68.0 h1:aHQeeJbo8zAkAa3pRzrVjZlbz6uSfeOXlJNQM0RAbz0= +google.golang.org/grpc v1.68.0/go.mod h1:fmSPC5AsjSBCK54MyHRx48kpOti1/jRfOlwEWywNjWA= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/staging/src/k8s.io/csi-translation-lib/go.mod b/staging/src/k8s.io/csi-translation-lib/go.mod index 6877442616572..278c78ea14543 100644 --- a/staging/src/k8s.io/csi-translation-lib/go.mod +++ b/staging/src/k8s.io/csi-translation-lib/go.mod @@ -27,7 +27,7 @@ require ( github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/x448/float16 v0.8.4 // indirect golang.org/x/net v0.30.0 // indirect - golang.org/x/text v0.19.0 // indirect + golang.org/x/text v0.20.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect diff --git a/staging/src/k8s.io/csi-translation-lib/go.sum b/staging/src/k8s.io/csi-translation-lib/go.sum index ce95781b32c4c..35754b3edb6ee 100644 --- a/staging/src/k8s.io/csi-translation-lib/go.sum +++ b/staging/src/k8s.io/csi-translation-lib/go.sum @@ -48,8 +48,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -77,17 +77,17 @@ golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= diff --git a/staging/src/k8s.io/dynamic-resource-allocation/go.mod b/staging/src/k8s.io/dynamic-resource-allocation/go.mod index 749e6482d9f08..f3cdcc630cd80 100644 --- a/staging/src/k8s.io/dynamic-resource-allocation/go.mod +++ b/staging/src/k8s.io/dynamic-resource-allocation/go.mod @@ -12,7 +12,7 @@ require ( github.com/google/go-cmp v0.6.0 github.com/onsi/gomega v1.35.1 github.com/stretchr/testify v1.9.0 - google.golang.org/grpc v1.65.0 + google.golang.org/grpc v1.68.0 k8s.io/api v0.0.0 k8s.io/apimachinery v0.0.0 k8s.io/apiserver v0.0.0 @@ -44,32 +44,33 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.17.9 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/common v0.60.1 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect github.com/x448/float16 v0.8.4 // indirect - go.opentelemetry.io/otel v1.28.0 // indirect - go.opentelemetry.io/otel/trace v1.28.0 // indirect + go.opentelemetry.io/otel v1.32.0 // indirect + go.opentelemetry.io/otel/trace v1.32.0 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect golang.org/x/net v0.30.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.26.0 // indirect + golang.org/x/sync v0.9.0 // indirect + golang.org/x/sys v0.27.0 // indirect golang.org/x/term v0.25.0 // indirect - golang.org/x/text v0.19.0 // indirect - golang.org/x/time v0.7.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect + golang.org/x/text v0.20.0 // indirect + golang.org/x/time v0.8.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/staging/src/k8s.io/dynamic-resource-allocation/go.sum b/staging/src/k8s.io/dynamic-resource-allocation/go.sum index 058dee2c6239c..346cb9c2f5de9 100644 --- a/staging/src/k8s.io/dynamic-resource-allocation/go.sum +++ b/staging/src/k8s.io/dynamic-resource-allocation/go.sum @@ -1,6 +1,6 @@ cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo= cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= @@ -18,7 +18,7 @@ github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyY github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= @@ -31,8 +31,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8Yc github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= -github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= @@ -55,11 +55,11 @@ github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1v github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g= github.com/google/cel-go v0.22.0/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs0yC4s8= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= @@ -76,10 +76,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc= @@ -91,6 +89,8 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -98,6 +98,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= @@ -118,20 +120,21 @@ github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7y github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= +github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= @@ -158,24 +161,24 @@ github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtX github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I= -go.etcd.io/etcd/api/v3 v3.5.16/go.mod h1:1P4SlIP/VwkDmGo3OlOD7faPeP8KDIFhqvciH5EfN28= -go.etcd.io/etcd/client/pkg/v3 v3.5.16/go.mod h1:V8acl8pcEK0Y2g19YlOV9m9ssUe6MgiDSobSoaBAM0E= -go.etcd.io/etcd/client/v2 v2.305.16/go.mod h1:h9YxWCzcdvZENbfzBTFCnoNumr2ax3F19sKMqHFmXHE= -go.etcd.io/etcd/client/v3 v3.5.16/go.mod h1:X+rExSGkyqxvu276cr2OwPLBaeqFu1cIl4vmRjAD/50= -go.etcd.io/etcd/pkg/v3 v3.5.16/go.mod h1:+lutCZHG5MBBFI/U4eYT5yL7sJfnexsoM20Y0t2uNuY= -go.etcd.io/etcd/raft/v3 v3.5.16/go.mod h1:P4UP14AxofMJ/54boWilabqqWoW9eLodl6I5GdGzazI= -go.etcd.io/etcd/server/v3 v3.5.16/go.mod h1:ynhyZZpdDp1Gq49jkUg5mfkDWZwXnn3eIqCqtJnrD/s= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= +go.etcd.io/bbolt v1.4.0-beta.0/go.mod h1:Qv5yHB6jkQESXT/uVfxJgUPMqgAyhL0GLxcQaz9bSec= +go.etcd.io/etcd/api/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:kiyBidVS9nGwfQcXcXqUndUJkMBcp0AdZ7sXmwSMD2I= +go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:5B0xZc6aoiWt7n0DYA7s3MrhTRfmp07hY7b/TykJQAs= +go.etcd.io/etcd/client/v2 v2.306.0-alpha.0/go.mod h1:eW78BCfOzS1HJgTNzDrb2E6xV1p6kqlpLpKkz7ErzCs= +go.etcd.io/etcd/client/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:5b6FTfh+1atULPdGeukvpK3u5N8GMskQpb0Z9klI6sU= +go.etcd.io/etcd/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:FL+PMODrygIxX5lpNEyv1y1GKqFTXa0p/XH31SHpbcw= +go.etcd.io/etcd/server/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:SSZTJkOelKEqXz6mwE1J9aSnZB3G7YROnHOGij32Opk= +go.etcd.io/raft/v3 v3.6.0-alpha.0/go.mod h1:QpxpKeYmocQQFHP75LxNrdJTukZmqQig9lotwYLsUJY= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0/go.mod h1:n8MR6/liuGB5EmTETUBeU5ZgqMOlqKRxUaqPQBOANZ8= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= +go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0/go.mod h1:3rHrKNtLIoS0oZwkY2vxi+oJcwFRWdtUyRII+so45p8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0/go.mod h1:JyA0FHXe22E1NeNiHmVp7kFHglnexDQ7uRWDiiJ1hKQ= +go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= +go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= +go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= +go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= @@ -201,21 +204,21 @@ golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbht golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -226,14 +229,12 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 h1:M0KvPgPmDZHPlbRbaNU1APr28TvwvvdUPlSv7PUvy8g= +google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/grpc v1.68.0 h1:aHQeeJbo8zAkAa3pRzrVjZlbz6uSfeOXlJNQM0RAbz0= +google.golang.org/grpc v1.68.0/go.mod h1:fmSPC5AsjSBCK54MyHRx48kpOti1/jRfOlwEWywNjWA= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/staging/src/k8s.io/endpointslice/go.mod b/staging/src/k8s.io/endpointslice/go.mod index f9066be4d82c6..c7cd365a9c6bf 100644 --- a/staging/src/k8s.io/endpointslice/go.mod +++ b/staging/src/k8s.io/endpointslice/go.mod @@ -35,26 +35,28 @@ require ( github.com/google/uuid v1.6.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.17.9 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/common v0.60.1 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/x448/float16 v0.8.4 // indirect - go.opentelemetry.io/otel v1.28.0 // indirect - go.opentelemetry.io/otel/trace v1.28.0 // indirect + go.opentelemetry.io/otel v1.32.0 // indirect + go.opentelemetry.io/otel/trace v1.32.0 // indirect golang.org/x/net v0.30.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/sys v0.26.0 // indirect + golang.org/x/sys v0.27.0 // indirect golang.org/x/term v0.25.0 // indirect - golang.org/x/text v0.19.0 // indirect - golang.org/x/time v0.7.0 // indirect + golang.org/x/text v0.20.0 // indirect + golang.org/x/time v0.8.0 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/staging/src/k8s.io/endpointslice/go.sum b/staging/src/k8s.io/endpointslice/go.sum index 886f63594bf7f..44cd6b9470b71 100644 --- a/staging/src/k8s.io/endpointslice/go.sum +++ b/staging/src/k8s.io/endpointslice/go.sum @@ -42,7 +42,7 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -57,7 +57,7 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= @@ -67,6 +67,8 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -74,6 +76,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= @@ -97,16 +101,16 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= +github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -126,14 +130,14 @@ github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtX github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= +go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0/go.mod h1:3rHrKNtLIoS0oZwkY2vxi+oJcwFRWdtUyRII+so45p8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0/go.mod h1:JyA0FHXe22E1NeNiHmVp7kFHglnexDQ7uRWDiiJ1hKQ= +go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= +go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= +go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= +go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= @@ -157,20 +161,20 @@ golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbht golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -181,10 +185,9 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/grpc v1.68.0/go.mod h1:fmSPC5AsjSBCK54MyHRx48kpOti1/jRfOlwEWywNjWA= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/staging/src/k8s.io/externaljwt/go.mod b/staging/src/k8s.io/externaljwt/go.mod index bec9fb9ac6c8c..e21e3d07934c5 100644 --- a/staging/src/k8s.io/externaljwt/go.mod +++ b/staging/src/k8s.io/externaljwt/go.mod @@ -8,13 +8,13 @@ godebug default=go1.23 require ( github.com/gogo/protobuf v1.3.2 - google.golang.org/grpc v1.65.0 + google.golang.org/grpc v1.68.0 google.golang.org/protobuf v1.35.1 ) require ( golang.org/x/net v0.30.0 // indirect - golang.org/x/sys v0.26.0 // indirect - golang.org/x/text v0.19.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect + golang.org/x/sys v0.27.0 // indirect + golang.org/x/text v0.20.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect ) diff --git a/staging/src/k8s.io/externaljwt/go.sum b/staging/src/k8s.io/externaljwt/go.sum index dfcea7ccbb065..2f137cc51808c 100644 --- a/staging/src/k8s.io/externaljwt/go.sum +++ b/staging/src/k8s.io/externaljwt/go.sum @@ -1,19 +1,21 @@ -cel.dev/expr v0.15.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cel.dev/expr v0.16.1/go.mod h1:AsGA5zb3WruAEQeQng1RZdGEXmBj0jvMWh6l5SnNuC8= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= -github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= -github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -29,21 +31,21 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= -golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -53,10 +55,10 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/grpc v1.68.0 h1:aHQeeJbo8zAkAa3pRzrVjZlbz6uSfeOXlJNQM0RAbz0= +google.golang.org/grpc v1.68.0/go.mod h1:fmSPC5AsjSBCK54MyHRx48kpOti1/jRfOlwEWywNjWA= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= diff --git a/staging/src/k8s.io/kms/go.mod b/staging/src/k8s.io/kms/go.mod index 3a53e03534285..4b6e66fd4a57f 100644 --- a/staging/src/k8s.io/kms/go.mod +++ b/staging/src/k8s.io/kms/go.mod @@ -8,13 +8,13 @@ godebug default=go1.23 require ( github.com/gogo/protobuf v1.3.2 - google.golang.org/grpc v1.65.0 + google.golang.org/grpc v1.68.0 ) require ( golang.org/x/net v0.30.0 // indirect - golang.org/x/sys v0.26.0 // indirect - golang.org/x/text v0.19.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect + golang.org/x/sys v0.27.0 // indirect + golang.org/x/text v0.20.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect google.golang.org/protobuf v1.35.1 // indirect ) diff --git a/staging/src/k8s.io/kms/go.sum b/staging/src/k8s.io/kms/go.sum index dfcea7ccbb065..2f137cc51808c 100644 --- a/staging/src/k8s.io/kms/go.sum +++ b/staging/src/k8s.io/kms/go.sum @@ -1,19 +1,21 @@ -cel.dev/expr v0.15.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cel.dev/expr v0.16.1/go.mod h1:AsGA5zb3WruAEQeQng1RZdGEXmBj0jvMWh6l5SnNuC8= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= -github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= -github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -29,21 +31,21 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= -golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -53,10 +55,10 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/grpc v1.68.0 h1:aHQeeJbo8zAkAa3pRzrVjZlbz6uSfeOXlJNQM0RAbz0= +google.golang.org/grpc v1.68.0/go.mod h1:fmSPC5AsjSBCK54MyHRx48kpOti1/jRfOlwEWywNjWA= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= diff --git a/staging/src/k8s.io/kms/internal/plugins/_mock/go.mod b/staging/src/k8s.io/kms/internal/plugins/_mock/go.mod index 24aa2bdf7013e..feea09cfebc29 100644 --- a/staging/src/k8s.io/kms/internal/plugins/_mock/go.mod +++ b/staging/src/k8s.io/kms/internal/plugins/_mock/go.mod @@ -15,10 +15,10 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/thales-e-security/pool v0.0.2 // indirect golang.org/x/net v0.30.0 // indirect - golang.org/x/sys v0.26.0 // indirect - golang.org/x/text v0.19.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect - google.golang.org/grpc v1.65.0 // indirect + golang.org/x/sys v0.27.0 // indirect + golang.org/x/text v0.20.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect + google.golang.org/grpc v1.68.0 // indirect google.golang.org/protobuf v1.35.1 // indirect ) diff --git a/staging/src/k8s.io/kms/internal/plugins/_mock/go.sum b/staging/src/k8s.io/kms/internal/plugins/_mock/go.sum index 3478c7213acec..4e6f071a9b2cf 100644 --- a/staging/src/k8s.io/kms/internal/plugins/_mock/go.sum +++ b/staging/src/k8s.io/kms/internal/plugins/_mock/go.sum @@ -4,6 +4,8 @@ github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= @@ -39,12 +41,12 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -53,9 +55,9 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/grpc v1.68.0 h1:aHQeeJbo8zAkAa3pRzrVjZlbz6uSfeOXlJNQM0RAbz0= +google.golang.org/grpc v1.68.0/go.mod h1:fmSPC5AsjSBCK54MyHRx48kpOti1/jRfOlwEWywNjWA= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= diff --git a/staging/src/k8s.io/kube-aggregator/go.mod b/staging/src/k8s.io/kube-aggregator/go.mod index 055d0faea2540..50eef7f04a750 100644 --- a/staging/src/k8s.io/kube-aggregator/go.mod +++ b/staging/src/k8s.io/kube-aggregator/go.mod @@ -14,9 +14,9 @@ require ( github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.9.0 - go.opentelemetry.io/otel v1.28.0 - go.opentelemetry.io/otel/sdk v1.28.0 - go.opentelemetry.io/otel/trace v1.28.0 + go.opentelemetry.io/otel v1.32.0 + go.opentelemetry.io/otel/sdk v1.32.0 + go.opentelemetry.io/otel/trace v1.32.0 golang.org/x/net v0.30.0 k8s.io/api v0.0.0 k8s.io/apimachinery v0.0.0 @@ -51,16 +51,18 @@ require ( github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.23.0 // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/google/btree v1.0.1 // indirect + github.com/google/btree v1.1.3 // indirect github.com/google/cel-go v0.22.0 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.17.9 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/moby/spdystream v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect @@ -69,20 +71,20 @@ require ( github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/common v0.60.1 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect github.com/x448/float16 v0.8.4 // indirect - go.etcd.io/etcd/api/v3 v3.5.16 // indirect - go.etcd.io/etcd/client/pkg/v3 v3.5.16 // indirect - go.etcd.io/etcd/client/v3 v3.5.16 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect + go.etcd.io/etcd/api/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd // indirect + go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd // indirect + go.etcd.io/etcd/client/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect - go.opentelemetry.io/otel/metric v1.28.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 // indirect + go.opentelemetry.io/otel/metric v1.32.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect @@ -90,15 +92,15 @@ require ( golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect golang.org/x/mod v0.21.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.26.0 // indirect + golang.org/x/sync v0.9.0 // indirect + golang.org/x/sys v0.27.0 // indirect golang.org/x/term v0.25.0 // indirect - golang.org/x/text v0.19.0 // indirect - golang.org/x/time v0.7.0 // indirect + golang.org/x/text v0.20.0 // indirect + golang.org/x/time v0.8.0 // indirect golang.org/x/tools v0.26.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect - google.golang.org/grpc v1.65.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect + google.golang.org/grpc v1.68.0 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/staging/src/k8s.io/kube-aggregator/go.sum b/staging/src/k8s.io/kube-aggregator/go.sum index 77d538064ab84..0135740cb4279 100644 --- a/staging/src/k8s.io/kube-aggregator/go.sum +++ b/staging/src/k8s.io/kube-aggregator/go.sum @@ -1,6 +1,6 @@ cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo= cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= @@ -22,7 +22,7 @@ github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyY github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= @@ -38,8 +38,8 @@ github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkp github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= -github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= @@ -68,13 +68,13 @@ github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZ github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= -github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo= +github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g= github.com/google/cel-go v0.22.0/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs0yC4s8= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= @@ -92,14 +92,10 @@ github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 h1:ad0vkEBuk23VJzZR9nkLVG0YAoN9coASF1GusYX6AlU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= @@ -112,6 +108,8 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -119,6 +117,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= @@ -142,21 +142,22 @@ github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7y github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= +github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= @@ -188,41 +189,40 @@ github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510/go.mod h1:UETIi67q github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0= -go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I= -go.etcd.io/etcd/api/v3 v3.5.16 h1:WvmyJVbjWqK4R1E+B12RRHz3bRGy9XVfh++MgbN+6n0= -go.etcd.io/etcd/api/v3 v3.5.16/go.mod h1:1P4SlIP/VwkDmGo3OlOD7faPeP8KDIFhqvciH5EfN28= -go.etcd.io/etcd/client/pkg/v3 v3.5.16 h1:ZgY48uH6UvB+/7R9Yf4x574uCO3jIx0TRDyetSfId3Q= -go.etcd.io/etcd/client/pkg/v3 v3.5.16/go.mod h1:V8acl8pcEK0Y2g19YlOV9m9ssUe6MgiDSobSoaBAM0E= -go.etcd.io/etcd/client/v2 v2.305.16 h1:kQrn9o5czVNaukf2A2At43cE9ZtWauOtf9vRZuiKXow= -go.etcd.io/etcd/client/v2 v2.305.16/go.mod h1:h9YxWCzcdvZENbfzBTFCnoNumr2ax3F19sKMqHFmXHE= -go.etcd.io/etcd/client/v3 v3.5.16 h1:sSmVYOAHeC9doqi0gv7v86oY/BTld0SEFGaxsU9eRhE= -go.etcd.io/etcd/client/v3 v3.5.16/go.mod h1:X+rExSGkyqxvu276cr2OwPLBaeqFu1cIl4vmRjAD/50= -go.etcd.io/etcd/pkg/v3 v3.5.16 h1:cnavs5WSPWeK4TYwPYfmcr3Joz9BH+TZ6qoUtz6/+mc= -go.etcd.io/etcd/pkg/v3 v3.5.16/go.mod h1:+lutCZHG5MBBFI/U4eYT5yL7sJfnexsoM20Y0t2uNuY= -go.etcd.io/etcd/raft/v3 v3.5.16 h1:zBXA3ZUpYs1AwiLGPafYAKKl/CORn/uaxYDwlNwndAk= -go.etcd.io/etcd/raft/v3 v3.5.16/go.mod h1:P4UP14AxofMJ/54boWilabqqWoW9eLodl6I5GdGzazI= -go.etcd.io/etcd/server/v3 v3.5.16 h1:d0/SAdJ3vVsZvF8IFVb1k8zqMZ+heGcNfft71ul9GWE= -go.etcd.io/etcd/server/v3 v3.5.16/go.mod h1:ynhyZZpdDp1Gq49jkUg5mfkDWZwXnn3eIqCqtJnrD/s= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= +go.etcd.io/bbolt v1.4.0-beta.0 h1:U7Y9yH6ZojEo5/BDFMXDXD1RNx9L7iKxudzqR68jLaM= +go.etcd.io/bbolt v1.4.0-beta.0/go.mod h1:Qv5yHB6jkQESXT/uVfxJgUPMqgAyhL0GLxcQaz9bSec= +go.etcd.io/etcd/api/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd h1:uuyUQSC6O58OfQH7KlKhmIbHjG/l+lTqyq6Y5vXOHcs= +go.etcd.io/etcd/api/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:kiyBidVS9nGwfQcXcXqUndUJkMBcp0AdZ7sXmwSMD2I= +go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd h1:l8QjUfDmZHqyNMTMwrZvIk+xbvZxOkmZjnFkmO9ktl8= +go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:5B0xZc6aoiWt7n0DYA7s3MrhTRfmp07hY7b/TykJQAs= +go.etcd.io/etcd/client/v2 v2.306.0-alpha.0 h1:9VRJ698EFIMfjOQtcjKMM7CWXOIxp9R4I8JA1mk+WT4= +go.etcd.io/etcd/client/v2 v2.306.0-alpha.0/go.mod h1:eW78BCfOzS1HJgTNzDrb2E6xV1p6kqlpLpKkz7ErzCs= +go.etcd.io/etcd/client/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd h1:Bpoj6mDJcLmMExbqoIFoqYYLzIu0bdVf/7XjP5UUs9E= +go.etcd.io/etcd/client/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:5b6FTfh+1atULPdGeukvpK3u5N8GMskQpb0Z9klI6sU= +go.etcd.io/etcd/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd h1:21vB+bra7pRWMRlZvgdDkb9FXExRshiUeDhTBvYIuLs= +go.etcd.io/etcd/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:FL+PMODrygIxX5lpNEyv1y1GKqFTXa0p/XH31SHpbcw= +go.etcd.io/etcd/server/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd h1:8EoX6i7o6cN9I1KbIpgUBVaY9YFQBRtV3wS68rpEf+s= +go.etcd.io/etcd/server/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:SSZTJkOelKEqXz6mwE1J9aSnZB3G7YROnHOGij32Opk= +go.etcd.io/raft/v3 v3.6.0-alpha.0 h1:cMmjAEjCKMGiQPowjSWM43Y5ZnBEeNP8RSYcm3ewtns= +go.etcd.io/raft/v3 v3.6.0-alpha.0/go.mod h1:QpxpKeYmocQQFHP75LxNrdJTukZmqQig9lotwYLsUJY= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 h1:yMkBS9yViCc7U7yeLzJPM2XizlfdVvBRSmsQDWu6qc0= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0/go.mod h1:n8MR6/liuGB5EmTETUBeU5ZgqMOlqKRxUaqPQBOANZ8= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= -go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= +go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 h1:IJFEoHiytixx8cMiVAO+GmHR6Frwu+u5Ur8njpFO6Ac= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0/go.mod h1:3rHrKNtLIoS0oZwkY2vxi+oJcwFRWdtUyRII+so45p8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 h1:9kV11HXBHZAvuPUZxmMWrH8hZn/6UnHX4K0mu36vNsU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0/go.mod h1:JyA0FHXe22E1NeNiHmVp7kFHglnexDQ7uRWDiiJ1hKQ= +go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= +go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= +go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= +go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= +go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= +go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -251,22 +251,22 @@ golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbht golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0= golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -277,15 +277,12 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= -google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 h1:M0KvPgPmDZHPlbRbaNU1APr28TvwvvdUPlSv7PUvy8g= +google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/grpc v1.68.0 h1:aHQeeJbo8zAkAa3pRzrVjZlbz6uSfeOXlJNQM0RAbz0= +google.golang.org/grpc v1.68.0/go.mod h1:fmSPC5AsjSBCK54MyHRx48kpOti1/jRfOlwEWywNjWA= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/staging/src/k8s.io/kube-controller-manager/go.mod b/staging/src/k8s.io/kube-controller-manager/go.mod index 32e8d8eb68780..75f25df83c911 100644 --- a/staging/src/k8s.io/kube-controller-manager/go.mod +++ b/staging/src/k8s.io/kube-controller-manager/go.mod @@ -21,10 +21,9 @@ require ( github.com/kr/pretty v0.3.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/x448/float16 v0.8.4 // indirect golang.org/x/net v0.30.0 // indirect - golang.org/x/text v0.19.0 // indirect + golang.org/x/text v0.20.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/inf.v0 v0.9.1 // indirect k8s.io/component-base v0.0.0 // indirect diff --git a/staging/src/k8s.io/kube-controller-manager/go.sum b/staging/src/k8s.io/kube-controller-manager/go.sum index 3c66da350b101..d2772b259dc05 100644 --- a/staging/src/k8s.io/kube-controller-manager/go.sum +++ b/staging/src/k8s.io/kube-controller-manager/go.sum @@ -31,7 +31,7 @@ github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/cel-go v0.22.0/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs0yC4s8= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -43,13 +43,14 @@ github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -57,6 +58,7 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= @@ -74,13 +76,13 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -93,17 +95,17 @@ github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.etcd.io/etcd/api/v3 v3.5.16/go.mod h1:1P4SlIP/VwkDmGo3OlOD7faPeP8KDIFhqvciH5EfN28= -go.etcd.io/etcd/client/pkg/v3 v3.5.16/go.mod h1:V8acl8pcEK0Y2g19YlOV9m9ssUe6MgiDSobSoaBAM0E= -go.etcd.io/etcd/client/v3 v3.5.16/go.mod h1:X+rExSGkyqxvu276cr2OwPLBaeqFu1cIl4vmRjAD/50= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= +go.etcd.io/etcd/api/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:kiyBidVS9nGwfQcXcXqUndUJkMBcp0AdZ7sXmwSMD2I= +go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:5B0xZc6aoiWt7n0DYA7s3MrhTRfmp07hY7b/TykJQAs= +go.etcd.io/etcd/client/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:5b6FTfh+1atULPdGeukvpK3u5N8GMskQpb0Z9klI6sU= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0/go.mod h1:n8MR6/liuGB5EmTETUBeU5ZgqMOlqKRxUaqPQBOANZ8= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0/go.mod h1:3rHrKNtLIoS0oZwkY2vxi+oJcwFRWdtUyRII+so45p8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0/go.mod h1:JyA0FHXe22E1NeNiHmVp7kFHglnexDQ7uRWDiiJ1hKQ= +go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= +go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= +go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= @@ -125,17 +127,17 @@ golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbht golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -145,9 +147,9 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/grpc v1.68.0/go.mod h1:fmSPC5AsjSBCK54MyHRx48kpOti1/jRfOlwEWywNjWA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/staging/src/k8s.io/kube-proxy/go.mod b/staging/src/k8s.io/kube-proxy/go.mod index b30d87d28d39a..b9cf8d1e5fc0e 100644 --- a/staging/src/k8s.io/kube-proxy/go.mod +++ b/staging/src/k8s.io/kube-proxy/go.mod @@ -22,23 +22,23 @@ require ( github.com/google/gofuzz v1.2.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.17.9 // indirect github.com/kr/text v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/common v0.60.1 // indirect github.com/prometheus/procfs v0.15.1 // indirect - github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/x448/float16 v0.8.4 // indirect - go.opentelemetry.io/otel v1.28.0 // indirect - go.opentelemetry.io/otel/trace v1.28.0 // indirect + go.opentelemetry.io/otel v1.32.0 // indirect + go.opentelemetry.io/otel/trace v1.32.0 // indirect golang.org/x/net v0.30.0 // indirect - golang.org/x/sys v0.26.0 // indirect - golang.org/x/text v0.19.0 // indirect + golang.org/x/sys v0.27.0 // indirect + golang.org/x/text v0.20.0 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect diff --git a/staging/src/k8s.io/kube-proxy/go.sum b/staging/src/k8s.io/kube-proxy/go.sum index 5e15223a67a35..a2665ed64204d 100644 --- a/staging/src/k8s.io/kube-proxy/go.sum +++ b/staging/src/k8s.io/kube-proxy/go.sum @@ -41,7 +41,7 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= @@ -51,10 +51,14 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= @@ -73,16 +77,16 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= +github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= @@ -98,14 +102,14 @@ github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtX github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= +go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0/go.mod h1:3rHrKNtLIoS0oZwkY2vxi+oJcwFRWdtUyRII+so45p8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0/go.mod h1:JyA0FHXe22E1NeNiHmVp7kFHglnexDQ7uRWDiiJ1hKQ= +go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= +go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= +go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= +go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= @@ -126,18 +130,18 @@ golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbht golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -147,10 +151,9 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/grpc v1.68.0/go.mod h1:fmSPC5AsjSBCK54MyHRx48kpOti1/jRfOlwEWywNjWA= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/staging/src/k8s.io/kube-scheduler/go.mod b/staging/src/k8s.io/kube-scheduler/go.mod index 711ee2adbc1ca..a6d04e3b47d77 100644 --- a/staging/src/k8s.io/kube-scheduler/go.mod +++ b/staging/src/k8s.io/kube-scheduler/go.mod @@ -25,7 +25,7 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/x448/float16 v0.8.4 // indirect golang.org/x/net v0.30.0 // indirect - golang.org/x/text v0.19.0 // indirect + golang.org/x/text v0.20.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect diff --git a/staging/src/k8s.io/kube-scheduler/go.sum b/staging/src/k8s.io/kube-scheduler/go.sum index 56805d17234f2..d18762e6b24d2 100644 --- a/staging/src/k8s.io/kube-scheduler/go.sum +++ b/staging/src/k8s.io/kube-scheduler/go.sum @@ -33,17 +33,19 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= @@ -60,12 +62,12 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -78,12 +80,12 @@ github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcY github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0/go.mod h1:3rHrKNtLIoS0oZwkY2vxi+oJcwFRWdtUyRII+so45p8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0/go.mod h1:JyA0FHXe22E1NeNiHmVp7kFHglnexDQ7uRWDiiJ1hKQ= +go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= +go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= +go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= @@ -104,17 +106,17 @@ golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbht golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -124,9 +126,9 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/grpc v1.68.0/go.mod h1:fmSPC5AsjSBCK54MyHRx48kpOti1/jRfOlwEWywNjWA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/staging/src/k8s.io/kubectl/go.mod b/staging/src/k8s.io/kubectl/go.mod index ad79bf5dd8985..4b06dcbe829e7 100644 --- a/staging/src/k8s.io/kubectl/go.mod +++ b/staging/src/k8s.io/kubectl/go.mod @@ -27,7 +27,7 @@ require ( github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.9.0 - golang.org/x/sys v0.26.0 + golang.org/x/sys v0.27.0 gopkg.in/evanphx/json-patch.v4 v4.12.0 k8s.io/api v0.0.0 k8s.io/apimachinery v0.0.0 @@ -59,7 +59,7 @@ require ( github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/google/btree v1.0.1 // indirect + github.com/google/btree v1.1.3 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect @@ -83,10 +83,10 @@ require ( github.com/xlab/treeprint v1.2.0 // indirect golang.org/x/net v0.30.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/sync v0.8.0 // indirect + golang.org/x/sync v0.9.0 // indirect golang.org/x/term v0.25.0 // indirect - golang.org/x/text v0.19.0 // indirect - golang.org/x/time v0.7.0 // indirect + golang.org/x/text v0.20.0 // indirect + golang.org/x/time v0.8.0 // indirect golang.org/x/tools v0.26.0 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/staging/src/k8s.io/kubectl/go.sum b/staging/src/k8s.io/kubectl/go.sum index f7008ed6bba77..0133f9992cc5e 100644 --- a/staging/src/k8s.io/kubectl/go.sum +++ b/staging/src/k8s.io/kubectl/go.sum @@ -54,8 +54,8 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -74,7 +74,7 @@ github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWm github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0= github.com/ianlancetaylor/demangle v0.0.0-20240312041847-bd984b5ce465/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= @@ -86,6 +86,7 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -93,6 +94,7 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffktY= @@ -129,12 +131,12 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= @@ -164,12 +166,12 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0/go.mod h1:3rHrKNtLIoS0oZwkY2vxi+oJcwFRWdtUyRII+so45p8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0/go.mod h1:JyA0FHXe22E1NeNiHmVp7kFHglnexDQ7uRWDiiJ1hKQ= +go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= +go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= +go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= @@ -193,23 +195,23 @@ golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbht golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0= golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -220,9 +222,9 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/grpc v1.68.0/go.mod h1:fmSPC5AsjSBCK54MyHRx48kpOti1/jRfOlwEWywNjWA= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/staging/src/k8s.io/kubelet/go.mod b/staging/src/k8s.io/kubelet/go.mod index e77380dcd7ce7..e1c24bee878d7 100644 --- a/staging/src/k8s.io/kubelet/go.mod +++ b/staging/src/k8s.io/kubelet/go.mod @@ -10,7 +10,7 @@ require ( github.com/emicklei/go-restful/v3 v3.11.0 github.com/gogo/protobuf v1.3.2 github.com/stretchr/testify v1.9.0 - google.golang.org/grpc v1.65.0 + google.golang.org/grpc v1.68.0 k8s.io/api v0.0.0 k8s.io/apimachinery v0.0.0 k8s.io/apiserver v0.0.0 @@ -33,6 +33,7 @@ require ( github.com/gorilla/websocket v1.5.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.17.9 // indirect github.com/kr/text v0.2.0 // indirect github.com/moby/spdystream v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect @@ -40,22 +41,22 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/common v0.60.1 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/x448/float16 v0.8.4 // indirect - go.opentelemetry.io/otel v1.28.0 // indirect - go.opentelemetry.io/otel/trace v1.28.0 // indirect + go.opentelemetry.io/otel v1.32.0 // indirect + go.opentelemetry.io/otel/trace v1.32.0 // indirect golang.org/x/net v0.30.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/sys v0.26.0 // indirect + golang.org/x/sys v0.27.0 // indirect golang.org/x/term v0.25.0 // indirect - golang.org/x/text v0.19.0 // indirect - golang.org/x/time v0.7.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect + golang.org/x/text v0.20.0 // indirect + golang.org/x/time v0.8.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/staging/src/k8s.io/kubelet/go.sum b/staging/src/k8s.io/kubelet/go.sum index 789ef1b188873..7358aca06fc3d 100644 --- a/staging/src/k8s.io/kubelet/go.sum +++ b/staging/src/k8s.io/kubelet/go.sum @@ -1,5 +1,5 @@ cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= @@ -16,7 +16,7 @@ github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyY github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= @@ -29,8 +29,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8Yc github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= -github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= @@ -51,11 +51,11 @@ github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1v github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/cel-go v0.22.0/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs0yC4s8= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= @@ -72,10 +72,8 @@ github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc= @@ -87,10 +85,14 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= @@ -112,20 +114,21 @@ github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7y github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= +github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= @@ -146,24 +149,24 @@ github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtX github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I= -go.etcd.io/etcd/api/v3 v3.5.16/go.mod h1:1P4SlIP/VwkDmGo3OlOD7faPeP8KDIFhqvciH5EfN28= -go.etcd.io/etcd/client/pkg/v3 v3.5.16/go.mod h1:V8acl8pcEK0Y2g19YlOV9m9ssUe6MgiDSobSoaBAM0E= -go.etcd.io/etcd/client/v2 v2.305.16/go.mod h1:h9YxWCzcdvZENbfzBTFCnoNumr2ax3F19sKMqHFmXHE= -go.etcd.io/etcd/client/v3 v3.5.16/go.mod h1:X+rExSGkyqxvu276cr2OwPLBaeqFu1cIl4vmRjAD/50= -go.etcd.io/etcd/pkg/v3 v3.5.16/go.mod h1:+lutCZHG5MBBFI/U4eYT5yL7sJfnexsoM20Y0t2uNuY= -go.etcd.io/etcd/raft/v3 v3.5.16/go.mod h1:P4UP14AxofMJ/54boWilabqqWoW9eLodl6I5GdGzazI= -go.etcd.io/etcd/server/v3 v3.5.16/go.mod h1:ynhyZZpdDp1Gq49jkUg5mfkDWZwXnn3eIqCqtJnrD/s= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= +go.etcd.io/bbolt v1.4.0-beta.0/go.mod h1:Qv5yHB6jkQESXT/uVfxJgUPMqgAyhL0GLxcQaz9bSec= +go.etcd.io/etcd/api/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:kiyBidVS9nGwfQcXcXqUndUJkMBcp0AdZ7sXmwSMD2I= +go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:5B0xZc6aoiWt7n0DYA7s3MrhTRfmp07hY7b/TykJQAs= +go.etcd.io/etcd/client/v2 v2.306.0-alpha.0/go.mod h1:eW78BCfOzS1HJgTNzDrb2E6xV1p6kqlpLpKkz7ErzCs= +go.etcd.io/etcd/client/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:5b6FTfh+1atULPdGeukvpK3u5N8GMskQpb0Z9klI6sU= +go.etcd.io/etcd/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:FL+PMODrygIxX5lpNEyv1y1GKqFTXa0p/XH31SHpbcw= +go.etcd.io/etcd/server/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:SSZTJkOelKEqXz6mwE1J9aSnZB3G7YROnHOGij32Opk= +go.etcd.io/raft/v3 v3.6.0-alpha.0/go.mod h1:QpxpKeYmocQQFHP75LxNrdJTukZmqQig9lotwYLsUJY= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0/go.mod h1:n8MR6/liuGB5EmTETUBeU5ZgqMOlqKRxUaqPQBOANZ8= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= +go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0/go.mod h1:3rHrKNtLIoS0oZwkY2vxi+oJcwFRWdtUyRII+so45p8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0/go.mod h1:JyA0FHXe22E1NeNiHmVp7kFHglnexDQ7uRWDiiJ1hKQ= +go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= +go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= +go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= +go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= @@ -187,20 +190,20 @@ golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbht golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -211,13 +214,11 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/grpc v1.68.0 h1:aHQeeJbo8zAkAa3pRzrVjZlbz6uSfeOXlJNQM0RAbz0= +google.golang.org/grpc v1.68.0/go.mod h1:fmSPC5AsjSBCK54MyHRx48kpOti1/jRfOlwEWywNjWA= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/staging/src/k8s.io/metrics/go.mod b/staging/src/k8s.io/metrics/go.mod index 8430be7c914b7..b6ff460aeabba 100644 --- a/staging/src/k8s.io/metrics/go.mod +++ b/staging/src/k8s.io/metrics/go.mod @@ -41,11 +41,11 @@ require ( golang.org/x/mod v0.21.0 // indirect golang.org/x/net v0.30.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.26.0 // indirect + golang.org/x/sync v0.9.0 // indirect + golang.org/x/sys v0.27.0 // indirect golang.org/x/term v0.25.0 // indirect - golang.org/x/text v0.19.0 // indirect - golang.org/x/time v0.7.0 // indirect + golang.org/x/text v0.20.0 // indirect + golang.org/x/time v0.8.0 // indirect golang.org/x/tools v0.26.0 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect diff --git a/staging/src/k8s.io/metrics/go.sum b/staging/src/k8s.io/metrics/go.sum index 15be7434f3223..5eac459ec4f57 100644 --- a/staging/src/k8s.io/metrics/go.sum +++ b/staging/src/k8s.io/metrics/go.sum @@ -27,7 +27,7 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -76,8 +76,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -115,22 +115,22 @@ golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbht golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0= golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= diff --git a/staging/src/k8s.io/mount-utils/go.mod b/staging/src/k8s.io/mount-utils/go.mod index 2ddb06e500d66..345dcab50aa9e 100644 --- a/staging/src/k8s.io/mount-utils/go.mod +++ b/staging/src/k8s.io/mount-utils/go.mod @@ -11,7 +11,7 @@ require ( github.com/moby/sys/userns v0.1.0 github.com/stretchr/testify v1.9.0 golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 - golang.org/x/sys v0.26.0 + golang.org/x/sys v0.27.0 k8s.io/klog/v2 v2.130.1 k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 ) @@ -21,7 +21,7 @@ require ( github.com/go-logr/logr v1.4.2 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/rogpeppe/go-internal v1.12.0 // indirect + github.com/rogpeppe/go-internal v1.13.1 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/staging/src/k8s.io/mount-utils/go.sum b/staging/src/k8s.io/mount-utils/go.sum index a515ab4a6ac2c..f72e0dfe248ba 100644 --- a/staging/src/k8s.io/mount-utils/go.sum +++ b/staging/src/k8s.io/mount-utils/go.sum @@ -19,8 +19,8 @@ github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsK github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= @@ -28,8 +28,8 @@ golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0 golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/staging/src/k8s.io/pod-security-admission/go.mod b/staging/src/k8s.io/pod-security-admission/go.mod index 82851be26b824..b2adf424f6469 100644 --- a/staging/src/k8s.io/pod-security-admission/go.mod +++ b/staging/src/k8s.io/pod-security-admission/go.mod @@ -45,39 +45,41 @@ require ( github.com/go-openapi/swag v0.23.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/google/btree v1.0.1 // indirect + github.com/google/btree v1.1.3 // indirect github.com/google/cel-go v0.22.0 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.6.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.17.9 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/common v0.60.1 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect github.com/x448/float16 v0.8.4 // indirect - go.etcd.io/etcd/api/v3 v3.5.16 // indirect - go.etcd.io/etcd/client/pkg/v3 v3.5.16 // indirect - go.etcd.io/etcd/client/v3 v3.5.16 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect + go.etcd.io/etcd/api/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd // indirect + go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd // indirect + go.etcd.io/etcd/client/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect - go.opentelemetry.io/otel v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect - go.opentelemetry.io/otel/metric v1.28.0 // indirect - go.opentelemetry.io/otel/sdk v1.28.0 // indirect - go.opentelemetry.io/otel/trace v1.28.0 // indirect + go.opentelemetry.io/otel v1.32.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 // indirect + go.opentelemetry.io/otel/metric v1.32.0 // indirect + go.opentelemetry.io/otel/sdk v1.32.0 // indirect + go.opentelemetry.io/otel/trace v1.32.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect @@ -85,14 +87,14 @@ require ( golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect golang.org/x/net v0.30.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.26.0 // indirect + golang.org/x/sync v0.9.0 // indirect + golang.org/x/sys v0.27.0 // indirect golang.org/x/term v0.25.0 // indirect - golang.org/x/text v0.19.0 // indirect - golang.org/x/time v0.7.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect - google.golang.org/grpc v1.65.0 // indirect + golang.org/x/text v0.20.0 // indirect + golang.org/x/time v0.8.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect + google.golang.org/grpc v1.68.0 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/staging/src/k8s.io/pod-security-admission/go.sum b/staging/src/k8s.io/pod-security-admission/go.sum index 723f969914195..b1b54ba9a2a19 100644 --- a/staging/src/k8s.io/pod-security-admission/go.sum +++ b/staging/src/k8s.io/pod-security-admission/go.sum @@ -1,6 +1,6 @@ cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo= cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= @@ -21,7 +21,7 @@ github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyY github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= @@ -37,8 +37,8 @@ github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkp github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= -github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= @@ -67,13 +67,13 @@ github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZ github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= -github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo= +github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g= github.com/google/cel-go v0.22.0/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs0yC4s8= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= @@ -91,14 +91,10 @@ github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 h1:ad0vkEBuk23VJzZR9nkLVG0YAoN9coASF1GusYX6AlU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= @@ -111,6 +107,8 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -118,6 +116,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= @@ -139,21 +139,22 @@ github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7y github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= +github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= @@ -184,41 +185,40 @@ github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510 h1:S2dVYn90KE98chq github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0= -go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I= -go.etcd.io/etcd/api/v3 v3.5.16 h1:WvmyJVbjWqK4R1E+B12RRHz3bRGy9XVfh++MgbN+6n0= -go.etcd.io/etcd/api/v3 v3.5.16/go.mod h1:1P4SlIP/VwkDmGo3OlOD7faPeP8KDIFhqvciH5EfN28= -go.etcd.io/etcd/client/pkg/v3 v3.5.16 h1:ZgY48uH6UvB+/7R9Yf4x574uCO3jIx0TRDyetSfId3Q= -go.etcd.io/etcd/client/pkg/v3 v3.5.16/go.mod h1:V8acl8pcEK0Y2g19YlOV9m9ssUe6MgiDSobSoaBAM0E= -go.etcd.io/etcd/client/v2 v2.305.16 h1:kQrn9o5czVNaukf2A2At43cE9ZtWauOtf9vRZuiKXow= -go.etcd.io/etcd/client/v2 v2.305.16/go.mod h1:h9YxWCzcdvZENbfzBTFCnoNumr2ax3F19sKMqHFmXHE= -go.etcd.io/etcd/client/v3 v3.5.16 h1:sSmVYOAHeC9doqi0gv7v86oY/BTld0SEFGaxsU9eRhE= -go.etcd.io/etcd/client/v3 v3.5.16/go.mod h1:X+rExSGkyqxvu276cr2OwPLBaeqFu1cIl4vmRjAD/50= -go.etcd.io/etcd/pkg/v3 v3.5.16 h1:cnavs5WSPWeK4TYwPYfmcr3Joz9BH+TZ6qoUtz6/+mc= -go.etcd.io/etcd/pkg/v3 v3.5.16/go.mod h1:+lutCZHG5MBBFI/U4eYT5yL7sJfnexsoM20Y0t2uNuY= -go.etcd.io/etcd/raft/v3 v3.5.16 h1:zBXA3ZUpYs1AwiLGPafYAKKl/CORn/uaxYDwlNwndAk= -go.etcd.io/etcd/raft/v3 v3.5.16/go.mod h1:P4UP14AxofMJ/54boWilabqqWoW9eLodl6I5GdGzazI= -go.etcd.io/etcd/server/v3 v3.5.16 h1:d0/SAdJ3vVsZvF8IFVb1k8zqMZ+heGcNfft71ul9GWE= -go.etcd.io/etcd/server/v3 v3.5.16/go.mod h1:ynhyZZpdDp1Gq49jkUg5mfkDWZwXnn3eIqCqtJnrD/s= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= +go.etcd.io/bbolt v1.4.0-beta.0 h1:U7Y9yH6ZojEo5/BDFMXDXD1RNx9L7iKxudzqR68jLaM= +go.etcd.io/bbolt v1.4.0-beta.0/go.mod h1:Qv5yHB6jkQESXT/uVfxJgUPMqgAyhL0GLxcQaz9bSec= +go.etcd.io/etcd/api/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd h1:uuyUQSC6O58OfQH7KlKhmIbHjG/l+lTqyq6Y5vXOHcs= +go.etcd.io/etcd/api/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:kiyBidVS9nGwfQcXcXqUndUJkMBcp0AdZ7sXmwSMD2I= +go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd h1:l8QjUfDmZHqyNMTMwrZvIk+xbvZxOkmZjnFkmO9ktl8= +go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:5B0xZc6aoiWt7n0DYA7s3MrhTRfmp07hY7b/TykJQAs= +go.etcd.io/etcd/client/v2 v2.306.0-alpha.0 h1:9VRJ698EFIMfjOQtcjKMM7CWXOIxp9R4I8JA1mk+WT4= +go.etcd.io/etcd/client/v2 v2.306.0-alpha.0/go.mod h1:eW78BCfOzS1HJgTNzDrb2E6xV1p6kqlpLpKkz7ErzCs= +go.etcd.io/etcd/client/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd h1:Bpoj6mDJcLmMExbqoIFoqYYLzIu0bdVf/7XjP5UUs9E= +go.etcd.io/etcd/client/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:5b6FTfh+1atULPdGeukvpK3u5N8GMskQpb0Z9klI6sU= +go.etcd.io/etcd/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd h1:21vB+bra7pRWMRlZvgdDkb9FXExRshiUeDhTBvYIuLs= +go.etcd.io/etcd/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:FL+PMODrygIxX5lpNEyv1y1GKqFTXa0p/XH31SHpbcw= +go.etcd.io/etcd/server/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd h1:8EoX6i7o6cN9I1KbIpgUBVaY9YFQBRtV3wS68rpEf+s= +go.etcd.io/etcd/server/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:SSZTJkOelKEqXz6mwE1J9aSnZB3G7YROnHOGij32Opk= +go.etcd.io/raft/v3 v3.6.0-alpha.0 h1:cMmjAEjCKMGiQPowjSWM43Y5ZnBEeNP8RSYcm3ewtns= +go.etcd.io/raft/v3 v3.6.0-alpha.0/go.mod h1:QpxpKeYmocQQFHP75LxNrdJTukZmqQig9lotwYLsUJY= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 h1:yMkBS9yViCc7U7yeLzJPM2XizlfdVvBRSmsQDWu6qc0= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0/go.mod h1:n8MR6/liuGB5EmTETUBeU5ZgqMOlqKRxUaqPQBOANZ8= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= -go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= +go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 h1:IJFEoHiytixx8cMiVAO+GmHR6Frwu+u5Ur8njpFO6Ac= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0/go.mod h1:3rHrKNtLIoS0oZwkY2vxi+oJcwFRWdtUyRII+so45p8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 h1:9kV11HXBHZAvuPUZxmMWrH8hZn/6UnHX4K0mu36vNsU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0/go.mod h1:JyA0FHXe22E1NeNiHmVp7kFHglnexDQ7uRWDiiJ1hKQ= +go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= +go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= +go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= +go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= +go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= +go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -246,21 +246,21 @@ golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbht golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -271,15 +271,12 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= -google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 h1:M0KvPgPmDZHPlbRbaNU1APr28TvwvvdUPlSv7PUvy8g= +google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/grpc v1.68.0 h1:aHQeeJbo8zAkAa3pRzrVjZlbz6uSfeOXlJNQM0RAbz0= +google.golang.org/grpc v1.68.0/go.mod h1:fmSPC5AsjSBCK54MyHRx48kpOti1/jRfOlwEWywNjWA= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/staging/src/k8s.io/sample-apiserver/go.mod b/staging/src/k8s.io/sample-apiserver/go.mod index 3b43f7be94a1e..415c1b989dcc6 100644 --- a/staging/src/k8s.io/sample-apiserver/go.mod +++ b/staging/src/k8s.io/sample-apiserver/go.mod @@ -43,40 +43,42 @@ require ( github.com/go-openapi/swag v0.23.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/google/btree v1.0.1 // indirect + github.com/google/btree v1.1.3 // indirect github.com/google/cel-go v0.22.0 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/uuid v1.6.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.17.9 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/common v0.60.1 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect github.com/x448/float16 v0.8.4 // indirect - go.etcd.io/etcd/api/v3 v3.5.16 // indirect - go.etcd.io/etcd/client/pkg/v3 v3.5.16 // indirect - go.etcd.io/etcd/client/v3 v3.5.16 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect + go.etcd.io/etcd/api/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd // indirect + go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd // indirect + go.etcd.io/etcd/client/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect - go.opentelemetry.io/otel v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect - go.opentelemetry.io/otel/metric v1.28.0 // indirect - go.opentelemetry.io/otel/sdk v1.28.0 // indirect - go.opentelemetry.io/otel/trace v1.28.0 // indirect + go.opentelemetry.io/otel v1.32.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 // indirect + go.opentelemetry.io/otel/metric v1.32.0 // indirect + go.opentelemetry.io/otel/sdk v1.32.0 // indirect + go.opentelemetry.io/otel/trace v1.32.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect @@ -85,15 +87,15 @@ require ( golang.org/x/mod v0.21.0 // indirect golang.org/x/net v0.30.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.26.0 // indirect + golang.org/x/sync v0.9.0 // indirect + golang.org/x/sys v0.27.0 // indirect golang.org/x/term v0.25.0 // indirect - golang.org/x/text v0.19.0 // indirect - golang.org/x/time v0.7.0 // indirect + golang.org/x/text v0.20.0 // indirect + golang.org/x/time v0.8.0 // indirect golang.org/x/tools v0.26.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect - google.golang.org/grpc v1.65.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect + google.golang.org/grpc v1.68.0 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/staging/src/k8s.io/sample-apiserver/go.sum b/staging/src/k8s.io/sample-apiserver/go.sum index 32e5c2b5f30e8..5ee5e25c289b0 100644 --- a/staging/src/k8s.io/sample-apiserver/go.sum +++ b/staging/src/k8s.io/sample-apiserver/go.sum @@ -1,6 +1,6 @@ cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo= cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= @@ -21,7 +21,7 @@ github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyY github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= @@ -37,8 +37,8 @@ github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkp github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= -github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= @@ -67,13 +67,13 @@ github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZ github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= -github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo= +github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g= github.com/google/cel-go v0.22.0/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs0yC4s8= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= @@ -91,14 +91,10 @@ github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 h1:ad0vkEBuk23VJzZR9nkLVG0YAoN9coASF1GusYX6AlU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= @@ -111,6 +107,8 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -118,6 +116,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= @@ -139,21 +139,22 @@ github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7y github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= +github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= @@ -185,41 +186,40 @@ github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510/go.mod h1:UETIi67q github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0= -go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I= -go.etcd.io/etcd/api/v3 v3.5.16 h1:WvmyJVbjWqK4R1E+B12RRHz3bRGy9XVfh++MgbN+6n0= -go.etcd.io/etcd/api/v3 v3.5.16/go.mod h1:1P4SlIP/VwkDmGo3OlOD7faPeP8KDIFhqvciH5EfN28= -go.etcd.io/etcd/client/pkg/v3 v3.5.16 h1:ZgY48uH6UvB+/7R9Yf4x574uCO3jIx0TRDyetSfId3Q= -go.etcd.io/etcd/client/pkg/v3 v3.5.16/go.mod h1:V8acl8pcEK0Y2g19YlOV9m9ssUe6MgiDSobSoaBAM0E= -go.etcd.io/etcd/client/v2 v2.305.16 h1:kQrn9o5czVNaukf2A2At43cE9ZtWauOtf9vRZuiKXow= -go.etcd.io/etcd/client/v2 v2.305.16/go.mod h1:h9YxWCzcdvZENbfzBTFCnoNumr2ax3F19sKMqHFmXHE= -go.etcd.io/etcd/client/v3 v3.5.16 h1:sSmVYOAHeC9doqi0gv7v86oY/BTld0SEFGaxsU9eRhE= -go.etcd.io/etcd/client/v3 v3.5.16/go.mod h1:X+rExSGkyqxvu276cr2OwPLBaeqFu1cIl4vmRjAD/50= -go.etcd.io/etcd/pkg/v3 v3.5.16 h1:cnavs5WSPWeK4TYwPYfmcr3Joz9BH+TZ6qoUtz6/+mc= -go.etcd.io/etcd/pkg/v3 v3.5.16/go.mod h1:+lutCZHG5MBBFI/U4eYT5yL7sJfnexsoM20Y0t2uNuY= -go.etcd.io/etcd/raft/v3 v3.5.16 h1:zBXA3ZUpYs1AwiLGPafYAKKl/CORn/uaxYDwlNwndAk= -go.etcd.io/etcd/raft/v3 v3.5.16/go.mod h1:P4UP14AxofMJ/54boWilabqqWoW9eLodl6I5GdGzazI= -go.etcd.io/etcd/server/v3 v3.5.16 h1:d0/SAdJ3vVsZvF8IFVb1k8zqMZ+heGcNfft71ul9GWE= -go.etcd.io/etcd/server/v3 v3.5.16/go.mod h1:ynhyZZpdDp1Gq49jkUg5mfkDWZwXnn3eIqCqtJnrD/s= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= +go.etcd.io/bbolt v1.4.0-beta.0 h1:U7Y9yH6ZojEo5/BDFMXDXD1RNx9L7iKxudzqR68jLaM= +go.etcd.io/bbolt v1.4.0-beta.0/go.mod h1:Qv5yHB6jkQESXT/uVfxJgUPMqgAyhL0GLxcQaz9bSec= +go.etcd.io/etcd/api/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd h1:uuyUQSC6O58OfQH7KlKhmIbHjG/l+lTqyq6Y5vXOHcs= +go.etcd.io/etcd/api/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:kiyBidVS9nGwfQcXcXqUndUJkMBcp0AdZ7sXmwSMD2I= +go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd h1:l8QjUfDmZHqyNMTMwrZvIk+xbvZxOkmZjnFkmO9ktl8= +go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:5B0xZc6aoiWt7n0DYA7s3MrhTRfmp07hY7b/TykJQAs= +go.etcd.io/etcd/client/v2 v2.306.0-alpha.0 h1:9VRJ698EFIMfjOQtcjKMM7CWXOIxp9R4I8JA1mk+WT4= +go.etcd.io/etcd/client/v2 v2.306.0-alpha.0/go.mod h1:eW78BCfOzS1HJgTNzDrb2E6xV1p6kqlpLpKkz7ErzCs= +go.etcd.io/etcd/client/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd h1:Bpoj6mDJcLmMExbqoIFoqYYLzIu0bdVf/7XjP5UUs9E= +go.etcd.io/etcd/client/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:5b6FTfh+1atULPdGeukvpK3u5N8GMskQpb0Z9klI6sU= +go.etcd.io/etcd/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd h1:21vB+bra7pRWMRlZvgdDkb9FXExRshiUeDhTBvYIuLs= +go.etcd.io/etcd/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:FL+PMODrygIxX5lpNEyv1y1GKqFTXa0p/XH31SHpbcw= +go.etcd.io/etcd/server/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd h1:8EoX6i7o6cN9I1KbIpgUBVaY9YFQBRtV3wS68rpEf+s= +go.etcd.io/etcd/server/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd/go.mod h1:SSZTJkOelKEqXz6mwE1J9aSnZB3G7YROnHOGij32Opk= +go.etcd.io/raft/v3 v3.6.0-alpha.0 h1:cMmjAEjCKMGiQPowjSWM43Y5ZnBEeNP8RSYcm3ewtns= +go.etcd.io/raft/v3 v3.6.0-alpha.0/go.mod h1:QpxpKeYmocQQFHP75LxNrdJTukZmqQig9lotwYLsUJY= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 h1:yMkBS9yViCc7U7yeLzJPM2XizlfdVvBRSmsQDWu6qc0= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0/go.mod h1:n8MR6/liuGB5EmTETUBeU5ZgqMOlqKRxUaqPQBOANZ8= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= -go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= +go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 h1:IJFEoHiytixx8cMiVAO+GmHR6Frwu+u5Ur8njpFO6Ac= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0/go.mod h1:3rHrKNtLIoS0oZwkY2vxi+oJcwFRWdtUyRII+so45p8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 h1:9kV11HXBHZAvuPUZxmMWrH8hZn/6UnHX4K0mu36vNsU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0/go.mod h1:JyA0FHXe22E1NeNiHmVp7kFHglnexDQ7uRWDiiJ1hKQ= +go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= +go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= +go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= +go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= +go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= +go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -248,22 +248,22 @@ golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbht golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0= golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -274,15 +274,12 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= -google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 h1:M0KvPgPmDZHPlbRbaNU1APr28TvwvvdUPlSv7PUvy8g= +google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/grpc v1.68.0 h1:aHQeeJbo8zAkAa3pRzrVjZlbz6uSfeOXlJNQM0RAbz0= +google.golang.org/grpc v1.68.0/go.mod h1:fmSPC5AsjSBCK54MyHRx48kpOti1/jRfOlwEWywNjWA= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/staging/src/k8s.io/sample-cli-plugin/go.mod b/staging/src/k8s.io/sample-cli-plugin/go.mod index 62e6172c25808..813e5d7642533 100644 --- a/staging/src/k8s.io/sample-cli-plugin/go.mod +++ b/staging/src/k8s.io/sample-cli-plugin/go.mod @@ -26,7 +26,7 @@ require ( github.com/go-openapi/swag v0.23.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/google/btree v1.0.1 // indirect + github.com/google/btree v1.1.3 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect @@ -49,11 +49,11 @@ require ( github.com/xlab/treeprint v1.2.0 // indirect golang.org/x/net v0.30.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.26.0 // indirect + golang.org/x/sync v0.9.0 // indirect + golang.org/x/sys v0.27.0 // indirect golang.org/x/term v0.25.0 // indirect - golang.org/x/text v0.19.0 // indirect - golang.org/x/time v0.7.0 // indirect + golang.org/x/text v0.20.0 // indirect + golang.org/x/time v0.8.0 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/staging/src/k8s.io/sample-cli-plugin/go.sum b/staging/src/k8s.io/sample-cli-plugin/go.sum index fdd3895bceb06..07c2c7fa9ecd6 100644 --- a/staging/src/k8s.io/sample-cli-plugin/go.sum +++ b/staging/src/k8s.io/sample-cli-plugin/go.sum @@ -36,8 +36,8 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -98,8 +98,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= @@ -145,22 +145,22 @@ golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbht golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= diff --git a/staging/src/k8s.io/sample-controller/go.mod b/staging/src/k8s.io/sample-controller/go.mod index 5478a788e0e19..d7a6586caf758 100644 --- a/staging/src/k8s.io/sample-controller/go.mod +++ b/staging/src/k8s.io/sample-controller/go.mod @@ -7,7 +7,7 @@ go 1.23.0 godebug default=go1.23 require ( - golang.org/x/time v0.7.0 + golang.org/x/time v0.8.0 k8s.io/api v0.0.0 k8s.io/apimachinery v0.0.0 k8s.io/client-go v0.0.0 @@ -41,10 +41,10 @@ require ( golang.org/x/mod v0.21.0 // indirect golang.org/x/net v0.30.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.26.0 // indirect + golang.org/x/sync v0.9.0 // indirect + golang.org/x/sys v0.27.0 // indirect golang.org/x/term v0.25.0 // indirect - golang.org/x/text v0.19.0 // indirect + golang.org/x/text v0.20.0 // indirect golang.org/x/tools v0.26.0 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect diff --git a/staging/src/k8s.io/sample-controller/go.sum b/staging/src/k8s.io/sample-controller/go.sum index a50a01ece01dd..fd039feb9c8b1 100644 --- a/staging/src/k8s.io/sample-controller/go.sum +++ b/staging/src/k8s.io/sample-controller/go.sum @@ -27,7 +27,7 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -76,8 +76,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -115,22 +115,22 @@ golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbht golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0= golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= diff --git a/vendor/github.com/golang-jwt/jwt/v4/parser.go b/vendor/github.com/golang-jwt/jwt/v4/parser.go index c0a6f6927917a..9dd36e5a5acd4 100644 --- a/vendor/github.com/golang-jwt/jwt/v4/parser.go +++ b/vendor/github.com/golang-jwt/jwt/v4/parser.go @@ -36,19 +36,21 @@ func NewParser(options ...ParserOption) *Parser { return p } -// Parse parses, validates, verifies the signature and returns the parsed token. -// keyFunc will receive the parsed token and should return the key for validating. +// Parse parses, validates, verifies the signature and returns the parsed token. keyFunc will +// receive the parsed token and should return the key for validating. func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc) } -// ParseWithClaims parses, validates, and verifies like Parse, but supplies a default object implementing the Claims -// interface. This provides default values which can be overridden and allows a caller to use their own type, rather -// than the default MapClaims implementation of Claims. +// ParseWithClaims parses, validates, and verifies like Parse, but supplies a default object +// implementing the Claims interface. This provides default values which can be overridden and +// allows a caller to use their own type, rather than the default MapClaims implementation of +// Claims. // -// Note: If you provide a custom claim implementation that embeds one of the standard claims (such as RegisteredClaims), -// make sure that a) you either embed a non-pointer version of the claims or b) if you are using a pointer, allocate the -// proper memory for it before passing in the overall claims, otherwise you might run into a panic. +// Note: If you provide a custom claim implementation that embeds one of the standard claims (such +// as RegisteredClaims), make sure that a) you either embed a non-pointer version of the claims or +// b) if you are using a pointer, allocate the proper memory for it before passing in the overall +// claims, otherwise you might run into a panic. func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { token, parts, err := p.ParseUnverified(tokenString, claims) if err != nil { @@ -85,12 +87,17 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable} } + // Perform validation + token.Signature = parts[2] + if err := token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil { + return token, &ValidationError{Inner: err, Errors: ValidationErrorSignatureInvalid} + } + vErr := &ValidationError{} // Validate Claims if !p.SkipClaimsValidation { if err := token.Claims.Valid(); err != nil { - // If the Claims Valid returned an error, check if it is a validation error, // If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set if e, ok := err.(*ValidationError); !ok { @@ -98,22 +105,14 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf } else { vErr = e } + return token, vErr } } - // Perform validation - token.Signature = parts[2] - if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil { - vErr.Inner = err - vErr.Errors |= ValidationErrorSignatureInvalid - } - - if vErr.valid() { - token.Valid = true - return token, nil - } + // No errors so far, token is valid. + token.Valid = true - return token, vErr + return token, nil } // ParseUnverified parses the token but doesn't validate the signature. diff --git a/vendor/github.com/golang/protobuf/descriptor/descriptor.go b/vendor/github.com/golang/protobuf/descriptor/descriptor.go deleted file mode 100644 index ffde8a65081e4..0000000000000 --- a/vendor/github.com/golang/protobuf/descriptor/descriptor.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package descriptor provides functions for obtaining the protocol buffer -// descriptors of generated Go types. -// -// Deprecated: See the "google.golang.org/protobuf/reflect/protoreflect" package -// for how to obtain an EnumDescriptor or MessageDescriptor in order to -// programatically interact with the protobuf type system. -package descriptor - -import ( - "bytes" - "compress/gzip" - "io/ioutil" - "sync" - - "github.com/golang/protobuf/proto" - "google.golang.org/protobuf/reflect/protodesc" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/runtime/protoimpl" - - descriptorpb "github.com/golang/protobuf/protoc-gen-go/descriptor" -) - -// Message is proto.Message with a method to return its descriptor. -// -// Deprecated: The Descriptor method may not be generated by future -// versions of protoc-gen-go, meaning that this interface may not -// be implemented by many concrete message types. -type Message interface { - proto.Message - Descriptor() ([]byte, []int) -} - -// ForMessage returns the file descriptor proto containing -// the message and the message descriptor proto for the message itself. -// The returned proto messages must not be mutated. -// -// Deprecated: Not all concrete message types satisfy the Message interface. -// Use MessageDescriptorProto instead. If possible, the calling code should -// be rewritten to use protobuf reflection instead. -// See package "google.golang.org/protobuf/reflect/protoreflect" for details. -func ForMessage(m Message) (*descriptorpb.FileDescriptorProto, *descriptorpb.DescriptorProto) { - return MessageDescriptorProto(m) -} - -type rawDesc struct { - fileDesc []byte - indexes []int -} - -var rawDescCache sync.Map // map[protoreflect.Descriptor]*rawDesc - -func deriveRawDescriptor(d protoreflect.Descriptor) ([]byte, []int) { - // Fast-path: check whether raw descriptors are already cached. - origDesc := d - if v, ok := rawDescCache.Load(origDesc); ok { - return v.(*rawDesc).fileDesc, v.(*rawDesc).indexes - } - - // Slow-path: derive the raw descriptor from the v2 descriptor. - - // Start with the leaf (a given enum or message declaration) and - // ascend upwards until we hit the parent file descriptor. - var idxs []int - for { - idxs = append(idxs, d.Index()) - d = d.Parent() - if d == nil { - // TODO: We could construct a FileDescriptor stub for standalone - // descriptors to satisfy the API. - return nil, nil - } - if _, ok := d.(protoreflect.FileDescriptor); ok { - break - } - } - - // Obtain the raw file descriptor. - fd := d.(protoreflect.FileDescriptor) - b, _ := proto.Marshal(protodesc.ToFileDescriptorProto(fd)) - file := protoimpl.X.CompressGZIP(b) - - // Reverse the indexes, since we populated it in reverse. - for i, j := 0, len(idxs)-1; i < j; i, j = i+1, j-1 { - idxs[i], idxs[j] = idxs[j], idxs[i] - } - - if v, ok := rawDescCache.LoadOrStore(origDesc, &rawDesc{file, idxs}); ok { - return v.(*rawDesc).fileDesc, v.(*rawDesc).indexes - } - return file, idxs -} - -// EnumRawDescriptor returns the GZIP'd raw file descriptor representing -// the enum and the index path to reach the enum declaration. -// The returned slices must not be mutated. -func EnumRawDescriptor(e proto.GeneratedEnum) ([]byte, []int) { - if ev, ok := e.(interface{ EnumDescriptor() ([]byte, []int) }); ok { - return ev.EnumDescriptor() - } - ed := protoimpl.X.EnumTypeOf(e) - return deriveRawDescriptor(ed.Descriptor()) -} - -// MessageRawDescriptor returns the GZIP'd raw file descriptor representing -// the message and the index path to reach the message declaration. -// The returned slices must not be mutated. -func MessageRawDescriptor(m proto.GeneratedMessage) ([]byte, []int) { - if mv, ok := m.(interface{ Descriptor() ([]byte, []int) }); ok { - return mv.Descriptor() - } - md := protoimpl.X.MessageTypeOf(m) - return deriveRawDescriptor(md.Descriptor()) -} - -var fileDescCache sync.Map // map[*byte]*descriptorpb.FileDescriptorProto - -func deriveFileDescriptor(rawDesc []byte) *descriptorpb.FileDescriptorProto { - // Fast-path: check whether descriptor protos are already cached. - if v, ok := fileDescCache.Load(&rawDesc[0]); ok { - return v.(*descriptorpb.FileDescriptorProto) - } - - // Slow-path: derive the descriptor proto from the GZIP'd message. - zr, err := gzip.NewReader(bytes.NewReader(rawDesc)) - if err != nil { - panic(err) - } - b, err := ioutil.ReadAll(zr) - if err != nil { - panic(err) - } - fd := new(descriptorpb.FileDescriptorProto) - if err := proto.Unmarshal(b, fd); err != nil { - panic(err) - } - if v, ok := fileDescCache.LoadOrStore(&rawDesc[0], fd); ok { - return v.(*descriptorpb.FileDescriptorProto) - } - return fd -} - -// EnumDescriptorProto returns the file descriptor proto representing -// the enum and the enum descriptor proto for the enum itself. -// The returned proto messages must not be mutated. -func EnumDescriptorProto(e proto.GeneratedEnum) (*descriptorpb.FileDescriptorProto, *descriptorpb.EnumDescriptorProto) { - rawDesc, idxs := EnumRawDescriptor(e) - if rawDesc == nil || idxs == nil { - return nil, nil - } - fd := deriveFileDescriptor(rawDesc) - if len(idxs) == 1 { - return fd, fd.EnumType[idxs[0]] - } - md := fd.MessageType[idxs[0]] - for _, i := range idxs[1 : len(idxs)-1] { - md = md.NestedType[i] - } - ed := md.EnumType[idxs[len(idxs)-1]] - return fd, ed -} - -// MessageDescriptorProto returns the file descriptor proto representing -// the message and the message descriptor proto for the message itself. -// The returned proto messages must not be mutated. -func MessageDescriptorProto(m proto.GeneratedMessage) (*descriptorpb.FileDescriptorProto, *descriptorpb.DescriptorProto) { - rawDesc, idxs := MessageRawDescriptor(m) - if rawDesc == nil || idxs == nil { - return nil, nil - } - fd := deriveFileDescriptor(rawDesc) - md := fd.MessageType[idxs[0]] - for _, i := range idxs[1:] { - md = md.NestedType[i] - } - return fd, md -} diff --git a/vendor/github.com/golang/protobuf/jsonpb/decode.go b/vendor/github.com/golang/protobuf/jsonpb/decode.go deleted file mode 100644 index c6f66f10393d2..0000000000000 --- a/vendor/github.com/golang/protobuf/jsonpb/decode.go +++ /dev/null @@ -1,531 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package jsonpb - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "math" - "reflect" - "strconv" - "strings" - "time" - - "github.com/golang/protobuf/proto" - "google.golang.org/protobuf/encoding/protojson" - protoV2 "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" -) - -const wrapJSONUnmarshalV2 = false - -// UnmarshalNext unmarshals the next JSON object from d into m. -func UnmarshalNext(d *json.Decoder, m proto.Message) error { - return new(Unmarshaler).UnmarshalNext(d, m) -} - -// Unmarshal unmarshals a JSON object from r into m. -func Unmarshal(r io.Reader, m proto.Message) error { - return new(Unmarshaler).Unmarshal(r, m) -} - -// UnmarshalString unmarshals a JSON object from s into m. -func UnmarshalString(s string, m proto.Message) error { - return new(Unmarshaler).Unmarshal(strings.NewReader(s), m) -} - -// Unmarshaler is a configurable object for converting from a JSON -// representation to a protocol buffer object. -type Unmarshaler struct { - // AllowUnknownFields specifies whether to allow messages to contain - // unknown JSON fields, as opposed to failing to unmarshal. - AllowUnknownFields bool - - // AnyResolver is used to resolve the google.protobuf.Any well-known type. - // If unset, the global registry is used by default. - AnyResolver AnyResolver -} - -// JSONPBUnmarshaler is implemented by protobuf messages that customize the way -// they are unmarshaled from JSON. Messages that implement this should also -// implement JSONPBMarshaler so that the custom format can be produced. -// -// The JSON unmarshaling must follow the JSON to proto specification: -// -// https://developers.google.com/protocol-buffers/docs/proto3#json -// -// Deprecated: Custom types should implement protobuf reflection instead. -type JSONPBUnmarshaler interface { - UnmarshalJSONPB(*Unmarshaler, []byte) error -} - -// Unmarshal unmarshals a JSON object from r into m. -func (u *Unmarshaler) Unmarshal(r io.Reader, m proto.Message) error { - return u.UnmarshalNext(json.NewDecoder(r), m) -} - -// UnmarshalNext unmarshals the next JSON object from d into m. -func (u *Unmarshaler) UnmarshalNext(d *json.Decoder, m proto.Message) error { - if m == nil { - return errors.New("invalid nil message") - } - - // Parse the next JSON object from the stream. - raw := json.RawMessage{} - if err := d.Decode(&raw); err != nil { - return err - } - - // Check for custom unmarshalers first since they may not properly - // implement protobuf reflection that the logic below relies on. - if jsu, ok := m.(JSONPBUnmarshaler); ok { - return jsu.UnmarshalJSONPB(u, raw) - } - - mr := proto.MessageReflect(m) - - // NOTE: For historical reasons, a top-level null is treated as a noop. - // This is incorrect, but kept for compatibility. - if string(raw) == "null" && mr.Descriptor().FullName() != "google.protobuf.Value" { - return nil - } - - if wrapJSONUnmarshalV2 { - // NOTE: If input message is non-empty, we need to preserve merge semantics - // of the old jsonpb implementation. These semantics are not supported by - // the protobuf JSON specification. - isEmpty := true - mr.Range(func(protoreflect.FieldDescriptor, protoreflect.Value) bool { - isEmpty = false // at least one iteration implies non-empty - return false - }) - if !isEmpty { - // Perform unmarshaling into a newly allocated, empty message. - mr = mr.New() - - // Use a defer to copy all unmarshaled fields into the original message. - dst := proto.MessageReflect(m) - defer mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { - dst.Set(fd, v) - return true - }) - } - - // Unmarshal using the v2 JSON unmarshaler. - opts := protojson.UnmarshalOptions{ - DiscardUnknown: u.AllowUnknownFields, - } - if u.AnyResolver != nil { - opts.Resolver = anyResolver{u.AnyResolver} - } - return opts.Unmarshal(raw, mr.Interface()) - } else { - if err := u.unmarshalMessage(mr, raw); err != nil { - return err - } - return protoV2.CheckInitialized(mr.Interface()) - } -} - -func (u *Unmarshaler) unmarshalMessage(m protoreflect.Message, in []byte) error { - md := m.Descriptor() - fds := md.Fields() - - if jsu, ok := proto.MessageV1(m.Interface()).(JSONPBUnmarshaler); ok { - return jsu.UnmarshalJSONPB(u, in) - } - - if string(in) == "null" && md.FullName() != "google.protobuf.Value" { - return nil - } - - switch wellKnownType(md.FullName()) { - case "Any": - var jsonObject map[string]json.RawMessage - if err := json.Unmarshal(in, &jsonObject); err != nil { - return err - } - - rawTypeURL, ok := jsonObject["@type"] - if !ok { - return errors.New("Any JSON doesn't have '@type'") - } - typeURL, err := unquoteString(string(rawTypeURL)) - if err != nil { - return fmt.Errorf("can't unmarshal Any's '@type': %q", rawTypeURL) - } - m.Set(fds.ByNumber(1), protoreflect.ValueOfString(typeURL)) - - var m2 protoreflect.Message - if u.AnyResolver != nil { - mi, err := u.AnyResolver.Resolve(typeURL) - if err != nil { - return err - } - m2 = proto.MessageReflect(mi) - } else { - mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL) - if err != nil { - if err == protoregistry.NotFound { - return fmt.Errorf("could not resolve Any message type: %v", typeURL) - } - return err - } - m2 = mt.New() - } - - if wellKnownType(m2.Descriptor().FullName()) != "" { - rawValue, ok := jsonObject["value"] - if !ok { - return errors.New("Any JSON doesn't have 'value'") - } - if err := u.unmarshalMessage(m2, rawValue); err != nil { - return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err) - } - } else { - delete(jsonObject, "@type") - rawJSON, err := json.Marshal(jsonObject) - if err != nil { - return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err) - } - if err = u.unmarshalMessage(m2, rawJSON); err != nil { - return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err) - } - } - - rawWire, err := protoV2.Marshal(m2.Interface()) - if err != nil { - return fmt.Errorf("can't marshal proto %v into Any.Value: %v", typeURL, err) - } - m.Set(fds.ByNumber(2), protoreflect.ValueOfBytes(rawWire)) - return nil - case "BoolValue", "BytesValue", "StringValue", - "Int32Value", "UInt32Value", "FloatValue", - "Int64Value", "UInt64Value", "DoubleValue": - fd := fds.ByNumber(1) - v, err := u.unmarshalValue(m.NewField(fd), in, fd) - if err != nil { - return err - } - m.Set(fd, v) - return nil - case "Duration": - v, err := unquoteString(string(in)) - if err != nil { - return err - } - d, err := time.ParseDuration(v) - if err != nil { - return fmt.Errorf("bad Duration: %v", err) - } - - sec := d.Nanoseconds() / 1e9 - nsec := d.Nanoseconds() % 1e9 - m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec))) - m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec))) - return nil - case "Timestamp": - v, err := unquoteString(string(in)) - if err != nil { - return err - } - t, err := time.Parse(time.RFC3339Nano, v) - if err != nil { - return fmt.Errorf("bad Timestamp: %v", err) - } - - sec := t.Unix() - nsec := t.Nanosecond() - m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec))) - m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec))) - return nil - case "Value": - switch { - case string(in) == "null": - m.Set(fds.ByNumber(1), protoreflect.ValueOfEnum(0)) - case string(in) == "true": - m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(true)) - case string(in) == "false": - m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(false)) - case hasPrefixAndSuffix('"', in, '"'): - s, err := unquoteString(string(in)) - if err != nil { - return fmt.Errorf("unrecognized type for Value %q", in) - } - m.Set(fds.ByNumber(3), protoreflect.ValueOfString(s)) - case hasPrefixAndSuffix('[', in, ']'): - v := m.Mutable(fds.ByNumber(6)) - return u.unmarshalMessage(v.Message(), in) - case hasPrefixAndSuffix('{', in, '}'): - v := m.Mutable(fds.ByNumber(5)) - return u.unmarshalMessage(v.Message(), in) - default: - f, err := strconv.ParseFloat(string(in), 0) - if err != nil { - return fmt.Errorf("unrecognized type for Value %q", in) - } - m.Set(fds.ByNumber(2), protoreflect.ValueOfFloat64(f)) - } - return nil - case "ListValue": - var jsonArray []json.RawMessage - if err := json.Unmarshal(in, &jsonArray); err != nil { - return fmt.Errorf("bad ListValue: %v", err) - } - - lv := m.Mutable(fds.ByNumber(1)).List() - for _, raw := range jsonArray { - ve := lv.NewElement() - if err := u.unmarshalMessage(ve.Message(), raw); err != nil { - return err - } - lv.Append(ve) - } - return nil - case "Struct": - var jsonObject map[string]json.RawMessage - if err := json.Unmarshal(in, &jsonObject); err != nil { - return fmt.Errorf("bad StructValue: %v", err) - } - - mv := m.Mutable(fds.ByNumber(1)).Map() - for key, raw := range jsonObject { - kv := protoreflect.ValueOf(key).MapKey() - vv := mv.NewValue() - if err := u.unmarshalMessage(vv.Message(), raw); err != nil { - return fmt.Errorf("bad value in StructValue for key %q: %v", key, err) - } - mv.Set(kv, vv) - } - return nil - } - - var jsonObject map[string]json.RawMessage - if err := json.Unmarshal(in, &jsonObject); err != nil { - return err - } - - // Handle known fields. - for i := 0; i < fds.Len(); i++ { - fd := fds.Get(i) - if fd.IsWeak() && fd.Message().IsPlaceholder() { - continue // weak reference is not linked in - } - - // Search for any raw JSON value associated with this field. - var raw json.RawMessage - name := string(fd.Name()) - if fd.Kind() == protoreflect.GroupKind { - name = string(fd.Message().Name()) - } - if v, ok := jsonObject[name]; ok { - delete(jsonObject, name) - raw = v - } - name = string(fd.JSONName()) - if v, ok := jsonObject[name]; ok { - delete(jsonObject, name) - raw = v - } - - field := m.NewField(fd) - // Unmarshal the field value. - if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) { - continue - } - v, err := u.unmarshalValue(field, raw, fd) - if err != nil { - return err - } - m.Set(fd, v) - } - - // Handle extension fields. - for name, raw := range jsonObject { - if !strings.HasPrefix(name, "[") || !strings.HasSuffix(name, "]") { - continue - } - - // Resolve the extension field by name. - xname := protoreflect.FullName(name[len("[") : len(name)-len("]")]) - xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname) - if xt == nil && isMessageSet(md) { - xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension")) - } - if xt == nil { - continue - } - delete(jsonObject, name) - fd := xt.TypeDescriptor() - if fd.ContainingMessage().FullName() != m.Descriptor().FullName() { - return fmt.Errorf("extension field %q does not extend message %q", xname, m.Descriptor().FullName()) - } - - field := m.NewField(fd) - // Unmarshal the field value. - if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) { - continue - } - v, err := u.unmarshalValue(field, raw, fd) - if err != nil { - return err - } - m.Set(fd, v) - } - - if !u.AllowUnknownFields && len(jsonObject) > 0 { - for name := range jsonObject { - return fmt.Errorf("unknown field %q in %v", name, md.FullName()) - } - } - return nil -} - -func isSingularWellKnownValue(fd protoreflect.FieldDescriptor) bool { - if fd.Cardinality() == protoreflect.Repeated { - return false - } - if md := fd.Message(); md != nil { - return md.FullName() == "google.protobuf.Value" - } - if ed := fd.Enum(); ed != nil { - return ed.FullName() == "google.protobuf.NullValue" - } - return false -} - -func isSingularJSONPBUnmarshaler(v protoreflect.Value, fd protoreflect.FieldDescriptor) bool { - if fd.Message() != nil && fd.Cardinality() != protoreflect.Repeated { - _, ok := proto.MessageV1(v.Interface()).(JSONPBUnmarshaler) - return ok - } - return false -} - -func (u *Unmarshaler) unmarshalValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { - switch { - case fd.IsList(): - var jsonArray []json.RawMessage - if err := json.Unmarshal(in, &jsonArray); err != nil { - return v, err - } - lv := v.List() - for _, raw := range jsonArray { - ve, err := u.unmarshalSingularValue(lv.NewElement(), raw, fd) - if err != nil { - return v, err - } - lv.Append(ve) - } - return v, nil - case fd.IsMap(): - var jsonObject map[string]json.RawMessage - if err := json.Unmarshal(in, &jsonObject); err != nil { - return v, err - } - kfd := fd.MapKey() - vfd := fd.MapValue() - mv := v.Map() - for key, raw := range jsonObject { - var kv protoreflect.MapKey - if kfd.Kind() == protoreflect.StringKind { - kv = protoreflect.ValueOf(key).MapKey() - } else { - v, err := u.unmarshalSingularValue(kfd.Default(), []byte(key), kfd) - if err != nil { - return v, err - } - kv = v.MapKey() - } - - vv, err := u.unmarshalSingularValue(mv.NewValue(), raw, vfd) - if err != nil { - return v, err - } - mv.Set(kv, vv) - } - return v, nil - default: - return u.unmarshalSingularValue(v, in, fd) - } -} - -var nonFinite = map[string]float64{ - `"NaN"`: math.NaN(), - `"Infinity"`: math.Inf(+1), - `"-Infinity"`: math.Inf(-1), -} - -func (u *Unmarshaler) unmarshalSingularValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { - switch fd.Kind() { - case protoreflect.BoolKind: - return unmarshalValue(in, new(bool)) - case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: - return unmarshalValue(trimQuote(in), new(int32)) - case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: - return unmarshalValue(trimQuote(in), new(int64)) - case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: - return unmarshalValue(trimQuote(in), new(uint32)) - case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: - return unmarshalValue(trimQuote(in), new(uint64)) - case protoreflect.FloatKind: - if f, ok := nonFinite[string(in)]; ok { - return protoreflect.ValueOfFloat32(float32(f)), nil - } - return unmarshalValue(trimQuote(in), new(float32)) - case protoreflect.DoubleKind: - if f, ok := nonFinite[string(in)]; ok { - return protoreflect.ValueOfFloat64(float64(f)), nil - } - return unmarshalValue(trimQuote(in), new(float64)) - case protoreflect.StringKind: - return unmarshalValue(in, new(string)) - case protoreflect.BytesKind: - return unmarshalValue(in, new([]byte)) - case protoreflect.EnumKind: - if hasPrefixAndSuffix('"', in, '"') { - vd := fd.Enum().Values().ByName(protoreflect.Name(trimQuote(in))) - if vd == nil { - return v, fmt.Errorf("unknown value %q for enum %s", in, fd.Enum().FullName()) - } - return protoreflect.ValueOfEnum(vd.Number()), nil - } - return unmarshalValue(in, new(protoreflect.EnumNumber)) - case protoreflect.MessageKind, protoreflect.GroupKind: - err := u.unmarshalMessage(v.Message(), in) - return v, err - default: - panic(fmt.Sprintf("invalid kind %v", fd.Kind())) - } -} - -func unmarshalValue(in []byte, v interface{}) (protoreflect.Value, error) { - err := json.Unmarshal(in, v) - return protoreflect.ValueOf(reflect.ValueOf(v).Elem().Interface()), err -} - -func unquoteString(in string) (out string, err error) { - err = json.Unmarshal([]byte(in), &out) - return out, err -} - -func hasPrefixAndSuffix(prefix byte, in []byte, suffix byte) bool { - if len(in) >= 2 && in[0] == prefix && in[len(in)-1] == suffix { - return true - } - return false -} - -// trimQuote is like unquoteString but simply strips surrounding quotes. -// This is incorrect, but is behavior done by the legacy implementation. -func trimQuote(in []byte) []byte { - if len(in) >= 2 && in[0] == '"' && in[len(in)-1] == '"' { - in = in[1 : len(in)-1] - } - return in -} diff --git a/vendor/github.com/golang/protobuf/jsonpb/encode.go b/vendor/github.com/golang/protobuf/jsonpb/encode.go deleted file mode 100644 index e9438a93f331c..0000000000000 --- a/vendor/github.com/golang/protobuf/jsonpb/encode.go +++ /dev/null @@ -1,560 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package jsonpb - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "math" - "reflect" - "sort" - "strconv" - "strings" - "time" - - "github.com/golang/protobuf/proto" - "google.golang.org/protobuf/encoding/protojson" - protoV2 "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" -) - -const wrapJSONMarshalV2 = false - -// Marshaler is a configurable object for marshaling protocol buffer messages -// to the specified JSON representation. -type Marshaler struct { - // OrigName specifies whether to use the original protobuf name for fields. - OrigName bool - - // EnumsAsInts specifies whether to render enum values as integers, - // as opposed to string values. - EnumsAsInts bool - - // EmitDefaults specifies whether to render fields with zero values. - EmitDefaults bool - - // Indent controls whether the output is compact or not. - // If empty, the output is compact JSON. Otherwise, every JSON object - // entry and JSON array value will be on its own line. - // Each line will be preceded by repeated copies of Indent, where the - // number of copies is the current indentation depth. - Indent string - - // AnyResolver is used to resolve the google.protobuf.Any well-known type. - // If unset, the global registry is used by default. - AnyResolver AnyResolver -} - -// JSONPBMarshaler is implemented by protobuf messages that customize the -// way they are marshaled to JSON. Messages that implement this should also -// implement JSONPBUnmarshaler so that the custom format can be parsed. -// -// The JSON marshaling must follow the proto to JSON specification: -// -// https://developers.google.com/protocol-buffers/docs/proto3#json -// -// Deprecated: Custom types should implement protobuf reflection instead. -type JSONPBMarshaler interface { - MarshalJSONPB(*Marshaler) ([]byte, error) -} - -// Marshal serializes a protobuf message as JSON into w. -func (jm *Marshaler) Marshal(w io.Writer, m proto.Message) error { - b, err := jm.marshal(m) - if len(b) > 0 { - if _, err := w.Write(b); err != nil { - return err - } - } - return err -} - -// MarshalToString serializes a protobuf message as JSON in string form. -func (jm *Marshaler) MarshalToString(m proto.Message) (string, error) { - b, err := jm.marshal(m) - if err != nil { - return "", err - } - return string(b), nil -} - -func (jm *Marshaler) marshal(m proto.Message) ([]byte, error) { - v := reflect.ValueOf(m) - if m == nil || (v.Kind() == reflect.Ptr && v.IsNil()) { - return nil, errors.New("Marshal called with nil") - } - - // Check for custom marshalers first since they may not properly - // implement protobuf reflection that the logic below relies on. - if jsm, ok := m.(JSONPBMarshaler); ok { - return jsm.MarshalJSONPB(jm) - } - - if wrapJSONMarshalV2 { - opts := protojson.MarshalOptions{ - UseProtoNames: jm.OrigName, - UseEnumNumbers: jm.EnumsAsInts, - EmitUnpopulated: jm.EmitDefaults, - Indent: jm.Indent, - } - if jm.AnyResolver != nil { - opts.Resolver = anyResolver{jm.AnyResolver} - } - return opts.Marshal(proto.MessageReflect(m).Interface()) - } else { - // Check for unpopulated required fields first. - m2 := proto.MessageReflect(m) - if err := protoV2.CheckInitialized(m2.Interface()); err != nil { - return nil, err - } - - w := jsonWriter{Marshaler: jm} - err := w.marshalMessage(m2, "", "") - return w.buf, err - } -} - -type jsonWriter struct { - *Marshaler - buf []byte -} - -func (w *jsonWriter) write(s string) { - w.buf = append(w.buf, s...) -} - -func (w *jsonWriter) marshalMessage(m protoreflect.Message, indent, typeURL string) error { - if jsm, ok := proto.MessageV1(m.Interface()).(JSONPBMarshaler); ok { - b, err := jsm.MarshalJSONPB(w.Marshaler) - if err != nil { - return err - } - if typeURL != "" { - // we are marshaling this object to an Any type - var js map[string]*json.RawMessage - if err = json.Unmarshal(b, &js); err != nil { - return fmt.Errorf("type %T produced invalid JSON: %v", m.Interface(), err) - } - turl, err := json.Marshal(typeURL) - if err != nil { - return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err) - } - js["@type"] = (*json.RawMessage)(&turl) - if b, err = json.Marshal(js); err != nil { - return err - } - } - w.write(string(b)) - return nil - } - - md := m.Descriptor() - fds := md.Fields() - - // Handle well-known types. - const secondInNanos = int64(time.Second / time.Nanosecond) - switch wellKnownType(md.FullName()) { - case "Any": - return w.marshalAny(m, indent) - case "BoolValue", "BytesValue", "StringValue", - "Int32Value", "UInt32Value", "FloatValue", - "Int64Value", "UInt64Value", "DoubleValue": - fd := fds.ByNumber(1) - return w.marshalValue(fd, m.Get(fd), indent) - case "Duration": - const maxSecondsInDuration = 315576000000 - // "Generated output always contains 0, 3, 6, or 9 fractional digits, - // depending on required precision." - s := m.Get(fds.ByNumber(1)).Int() - ns := m.Get(fds.ByNumber(2)).Int() - if s < -maxSecondsInDuration || s > maxSecondsInDuration { - return fmt.Errorf("seconds out of range %v", s) - } - if ns <= -secondInNanos || ns >= secondInNanos { - return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos) - } - if (s > 0 && ns < 0) || (s < 0 && ns > 0) { - return errors.New("signs of seconds and nanos do not match") - } - var sign string - if s < 0 || ns < 0 { - sign, s, ns = "-", -1*s, -1*ns - } - x := fmt.Sprintf("%s%d.%09d", sign, s, ns) - x = strings.TrimSuffix(x, "000") - x = strings.TrimSuffix(x, "000") - x = strings.TrimSuffix(x, ".000") - w.write(fmt.Sprintf(`"%vs"`, x)) - return nil - case "Timestamp": - // "RFC 3339, where generated output will always be Z-normalized - // and uses 0, 3, 6 or 9 fractional digits." - s := m.Get(fds.ByNumber(1)).Int() - ns := m.Get(fds.ByNumber(2)).Int() - if ns < 0 || ns >= secondInNanos { - return fmt.Errorf("ns out of range [0, %v)", secondInNanos) - } - t := time.Unix(s, ns).UTC() - // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits). - x := t.Format("2006-01-02T15:04:05.000000000") - x = strings.TrimSuffix(x, "000") - x = strings.TrimSuffix(x, "000") - x = strings.TrimSuffix(x, ".000") - w.write(fmt.Sprintf(`"%vZ"`, x)) - return nil - case "Value": - // JSON value; which is a null, number, string, bool, object, or array. - od := md.Oneofs().Get(0) - fd := m.WhichOneof(od) - if fd == nil { - return errors.New("nil Value") - } - return w.marshalValue(fd, m.Get(fd), indent) - case "Struct", "ListValue": - // JSON object or array. - fd := fds.ByNumber(1) - return w.marshalValue(fd, m.Get(fd), indent) - } - - w.write("{") - if w.Indent != "" { - w.write("\n") - } - - firstField := true - if typeURL != "" { - if err := w.marshalTypeURL(indent, typeURL); err != nil { - return err - } - firstField = false - } - - for i := 0; i < fds.Len(); { - fd := fds.Get(i) - if od := fd.ContainingOneof(); od != nil { - fd = m.WhichOneof(od) - i += od.Fields().Len() - if fd == nil { - continue - } - } else { - i++ - } - - v := m.Get(fd) - - if !m.Has(fd) { - if !w.EmitDefaults || fd.ContainingOneof() != nil { - continue - } - if fd.Cardinality() != protoreflect.Repeated && (fd.Message() != nil || fd.Syntax() == protoreflect.Proto2) { - v = protoreflect.Value{} // use "null" for singular messages or proto2 scalars - } - } - - if !firstField { - w.writeComma() - } - if err := w.marshalField(fd, v, indent); err != nil { - return err - } - firstField = false - } - - // Handle proto2 extensions. - if md.ExtensionRanges().Len() > 0 { - // Collect a sorted list of all extension descriptor and values. - type ext struct { - desc protoreflect.FieldDescriptor - val protoreflect.Value - } - var exts []ext - m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { - if fd.IsExtension() { - exts = append(exts, ext{fd, v}) - } - return true - }) - sort.Slice(exts, func(i, j int) bool { - return exts[i].desc.Number() < exts[j].desc.Number() - }) - - for _, ext := range exts { - if !firstField { - w.writeComma() - } - if err := w.marshalField(ext.desc, ext.val, indent); err != nil { - return err - } - firstField = false - } - } - - if w.Indent != "" { - w.write("\n") - w.write(indent) - } - w.write("}") - return nil -} - -func (w *jsonWriter) writeComma() { - if w.Indent != "" { - w.write(",\n") - } else { - w.write(",") - } -} - -func (w *jsonWriter) marshalAny(m protoreflect.Message, indent string) error { - // "If the Any contains a value that has a special JSON mapping, - // it will be converted as follows: {"@type": xxx, "value": yyy}. - // Otherwise, the value will be converted into a JSON object, - // and the "@type" field will be inserted to indicate the actual data type." - md := m.Descriptor() - typeURL := m.Get(md.Fields().ByNumber(1)).String() - rawVal := m.Get(md.Fields().ByNumber(2)).Bytes() - - var m2 protoreflect.Message - if w.AnyResolver != nil { - mi, err := w.AnyResolver.Resolve(typeURL) - if err != nil { - return err - } - m2 = proto.MessageReflect(mi) - } else { - mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL) - if err != nil { - return err - } - m2 = mt.New() - } - - if err := protoV2.Unmarshal(rawVal, m2.Interface()); err != nil { - return err - } - - if wellKnownType(m2.Descriptor().FullName()) == "" { - return w.marshalMessage(m2, indent, typeURL) - } - - w.write("{") - if w.Indent != "" { - w.write("\n") - } - if err := w.marshalTypeURL(indent, typeURL); err != nil { - return err - } - w.writeComma() - if w.Indent != "" { - w.write(indent) - w.write(w.Indent) - w.write(`"value": `) - } else { - w.write(`"value":`) - } - if err := w.marshalMessage(m2, indent+w.Indent, ""); err != nil { - return err - } - if w.Indent != "" { - w.write("\n") - w.write(indent) - } - w.write("}") - return nil -} - -func (w *jsonWriter) marshalTypeURL(indent, typeURL string) error { - if w.Indent != "" { - w.write(indent) - w.write(w.Indent) - } - w.write(`"@type":`) - if w.Indent != "" { - w.write(" ") - } - b, err := json.Marshal(typeURL) - if err != nil { - return err - } - w.write(string(b)) - return nil -} - -// marshalField writes field description and value to the Writer. -func (w *jsonWriter) marshalField(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { - if w.Indent != "" { - w.write(indent) - w.write(w.Indent) - } - w.write(`"`) - switch { - case fd.IsExtension(): - // For message set, use the fname of the message as the extension name. - name := string(fd.FullName()) - if isMessageSet(fd.ContainingMessage()) { - name = strings.TrimSuffix(name, ".message_set_extension") - } - - w.write("[" + name + "]") - case w.OrigName: - name := string(fd.Name()) - if fd.Kind() == protoreflect.GroupKind { - name = string(fd.Message().Name()) - } - w.write(name) - default: - w.write(string(fd.JSONName())) - } - w.write(`":`) - if w.Indent != "" { - w.write(" ") - } - return w.marshalValue(fd, v, indent) -} - -func (w *jsonWriter) marshalValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { - switch { - case fd.IsList(): - w.write("[") - comma := "" - lv := v.List() - for i := 0; i < lv.Len(); i++ { - w.write(comma) - if w.Indent != "" { - w.write("\n") - w.write(indent) - w.write(w.Indent) - w.write(w.Indent) - } - if err := w.marshalSingularValue(fd, lv.Get(i), indent+w.Indent); err != nil { - return err - } - comma = "," - } - if w.Indent != "" { - w.write("\n") - w.write(indent) - w.write(w.Indent) - } - w.write("]") - return nil - case fd.IsMap(): - kfd := fd.MapKey() - vfd := fd.MapValue() - mv := v.Map() - - // Collect a sorted list of all map keys and values. - type entry struct{ key, val protoreflect.Value } - var entries []entry - mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { - entries = append(entries, entry{k.Value(), v}) - return true - }) - sort.Slice(entries, func(i, j int) bool { - switch kfd.Kind() { - case protoreflect.BoolKind: - return !entries[i].key.Bool() && entries[j].key.Bool() - case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: - return entries[i].key.Int() < entries[j].key.Int() - case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind: - return entries[i].key.Uint() < entries[j].key.Uint() - case protoreflect.StringKind: - return entries[i].key.String() < entries[j].key.String() - default: - panic("invalid kind") - } - }) - - w.write(`{`) - comma := "" - for _, entry := range entries { - w.write(comma) - if w.Indent != "" { - w.write("\n") - w.write(indent) - w.write(w.Indent) - w.write(w.Indent) - } - - s := fmt.Sprint(entry.key.Interface()) - b, err := json.Marshal(s) - if err != nil { - return err - } - w.write(string(b)) - - w.write(`:`) - if w.Indent != "" { - w.write(` `) - } - - if err := w.marshalSingularValue(vfd, entry.val, indent+w.Indent); err != nil { - return err - } - comma = "," - } - if w.Indent != "" { - w.write("\n") - w.write(indent) - w.write(w.Indent) - } - w.write(`}`) - return nil - default: - return w.marshalSingularValue(fd, v, indent) - } -} - -func (w *jsonWriter) marshalSingularValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { - switch { - case !v.IsValid(): - w.write("null") - return nil - case fd.Message() != nil: - return w.marshalMessage(v.Message(), indent+w.Indent, "") - case fd.Enum() != nil: - if fd.Enum().FullName() == "google.protobuf.NullValue" { - w.write("null") - return nil - } - - vd := fd.Enum().Values().ByNumber(v.Enum()) - if vd == nil || w.EnumsAsInts { - w.write(strconv.Itoa(int(v.Enum()))) - } else { - w.write(`"` + string(vd.Name()) + `"`) - } - return nil - default: - switch v.Interface().(type) { - case float32, float64: - switch { - case math.IsInf(v.Float(), +1): - w.write(`"Infinity"`) - return nil - case math.IsInf(v.Float(), -1): - w.write(`"-Infinity"`) - return nil - case math.IsNaN(v.Float()): - w.write(`"NaN"`) - return nil - } - case int64, uint64: - w.write(fmt.Sprintf(`"%d"`, v.Interface())) - return nil - } - - b, err := json.Marshal(v.Interface()) - if err != nil { - return err - } - w.write(string(b)) - return nil - } -} diff --git a/vendor/github.com/golang/protobuf/jsonpb/json.go b/vendor/github.com/golang/protobuf/jsonpb/json.go deleted file mode 100644 index 480e2448de661..0000000000000 --- a/vendor/github.com/golang/protobuf/jsonpb/json.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package jsonpb provides functionality to marshal and unmarshal between a -// protocol buffer message and JSON. It follows the specification at -// https://developers.google.com/protocol-buffers/docs/proto3#json. -// -// Do not rely on the default behavior of the standard encoding/json package -// when called on generated message types as it does not operate correctly. -// -// Deprecated: Use the "google.golang.org/protobuf/encoding/protojson" -// package instead. -package jsonpb - -import ( - "github.com/golang/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" - "google.golang.org/protobuf/runtime/protoimpl" -) - -// AnyResolver takes a type URL, present in an Any message, -// and resolves it into an instance of the associated message. -type AnyResolver interface { - Resolve(typeURL string) (proto.Message, error) -} - -type anyResolver struct{ AnyResolver } - -func (r anyResolver) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { - return r.FindMessageByURL(string(message)) -} - -func (r anyResolver) FindMessageByURL(url string) (protoreflect.MessageType, error) { - m, err := r.Resolve(url) - if err != nil { - return nil, err - } - return protoimpl.X.MessageTypeOf(m), nil -} - -func (r anyResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { - return protoregistry.GlobalTypes.FindExtensionByName(field) -} - -func (r anyResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { - return protoregistry.GlobalTypes.FindExtensionByNumber(message, field) -} - -func wellKnownType(s protoreflect.FullName) string { - if s.Parent() == "google.protobuf" { - switch s.Name() { - case "Empty", "Any", - "BoolValue", "BytesValue", "StringValue", - "Int32Value", "UInt32Value", "FloatValue", - "Int64Value", "UInt64Value", "DoubleValue", - "Duration", "Timestamp", - "NullValue", "Struct", "Value", "ListValue": - return string(s.Name()) - } - } - return "" -} - -func isMessageSet(md protoreflect.MessageDescriptor) bool { - ms, ok := md.(interface{ IsMessageSet() bool }) - return ok && ms.IsMessageSet() -} diff --git a/vendor/github.com/google/btree/.travis.yml b/vendor/github.com/google/btree/.travis.yml deleted file mode 100644 index 4f2ee4d973389..0000000000000 --- a/vendor/github.com/google/btree/.travis.yml +++ /dev/null @@ -1 +0,0 @@ -language: go diff --git a/vendor/github.com/google/btree/README.md b/vendor/github.com/google/btree/README.md index 6062a4dacd493..eab5dbf7ba732 100644 --- a/vendor/github.com/google/btree/README.md +++ b/vendor/github.com/google/btree/README.md @@ -1,7 +1,5 @@ # BTree implementation for Go -![Travis CI Build Status](https://api.travis-ci.org/google/btree.svg?branch=master) - This package provides an in-memory B-Tree implementation for Go, useful as an ordered, mutable data structure. diff --git a/vendor/github.com/google/btree/btree.go b/vendor/github.com/google/btree/btree.go index b83acdbc6d3ab..6f5184fef7d8e 100644 --- a/vendor/github.com/google/btree/btree.go +++ b/vendor/github.com/google/btree/btree.go @@ -12,6 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !go1.18 +// +build !go1.18 + // Package btree implements in-memory B-Trees of arbitrary degree. // // btree implements an in-memory B-Tree for use as an ordered data structure. @@ -476,7 +479,7 @@ func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) child := n.mutableChild(i) // merge with right child mergeItem := n.items.removeAt(i) - mergeChild := n.children.removeAt(i + 1) + mergeChild := n.children.removeAt(i + 1).mutableFor(n.cow) child.items = append(child.items, mergeItem) child.items = append(child.items, mergeChild.items...) child.children = append(child.children, mergeChild.children...) diff --git a/vendor/github.com/google/btree/btree_generic.go b/vendor/github.com/google/btree/btree_generic.go new file mode 100644 index 0000000000000..e44a0f48804a7 --- /dev/null +++ b/vendor/github.com/google/btree/btree_generic.go @@ -0,0 +1,1083 @@ +// Copyright 2014-2022 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +// In Go 1.18 and beyond, a BTreeG generic is created, and BTree is a specific +// instantiation of that generic for the Item interface, with a backwards- +// compatible API. Before go1.18, generics are not supported, +// and BTree is just an implementation based around the Item interface. + +// Package btree implements in-memory B-Trees of arbitrary degree. +// +// btree implements an in-memory B-Tree for use as an ordered data structure. +// It is not meant for persistent storage solutions. +// +// It has a flatter structure than an equivalent red-black or other binary tree, +// which in some cases yields better memory usage and/or performance. +// See some discussion on the matter here: +// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html +// Note, though, that this project is in no way related to the C++ B-Tree +// implementation written about there. +// +// Within this tree, each node contains a slice of items and a (possibly nil) +// slice of children. For basic numeric values or raw structs, this can cause +// efficiency differences when compared to equivalent C++ template code that +// stores values in arrays within the node: +// * Due to the overhead of storing values as interfaces (each +// value needs to be stored as the value itself, then 2 words for the +// interface pointing to that value and its type), resulting in higher +// memory use. +// * Since interfaces can point to values anywhere in memory, values are +// most likely not stored in contiguous blocks, resulting in a higher +// number of cache misses. +// These issues don't tend to matter, though, when working with strings or other +// heap-allocated structures, since C++-equivalent structures also must store +// pointers and also distribute their values across the heap. +// +// This implementation is designed to be a drop-in replacement to gollrb.LLRB +// trees, (http://github.com/petar/gollrb), an excellent and probably the most +// widely used ordered tree implementation in the Go ecosystem currently. +// Its functions, therefore, exactly mirror those of +// llrb.LLRB where possible. Unlike gollrb, though, we currently don't +// support storing multiple equivalent values. +// +// There are two implementations; those suffixed with 'G' are generics, usable +// for any type, and require a passed-in "less" function to define their ordering. +// Those without this prefix are specific to the 'Item' interface, and use +// its 'Less' function for ordering. +package btree + +import ( + "fmt" + "io" + "sort" + "strings" + "sync" +) + +// Item represents a single object in the tree. +type Item interface { + // Less tests whether the current item is less than the given argument. + // + // This must provide a strict weak ordering. + // If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only + // hold one of either a or b in the tree). + Less(than Item) bool +} + +const ( + DefaultFreeListSize = 32 +) + +// FreeListG represents a free list of btree nodes. By default each +// BTree has its own FreeList, but multiple BTrees can share the same +// FreeList, in particular when they're created with Clone. +// Two Btrees using the same freelist are safe for concurrent write access. +type FreeListG[T any] struct { + mu sync.Mutex + freelist []*node[T] +} + +// NewFreeListG creates a new free list. +// size is the maximum size of the returned free list. +func NewFreeListG[T any](size int) *FreeListG[T] { + return &FreeListG[T]{freelist: make([]*node[T], 0, size)} +} + +func (f *FreeListG[T]) newNode() (n *node[T]) { + f.mu.Lock() + index := len(f.freelist) - 1 + if index < 0 { + f.mu.Unlock() + return new(node[T]) + } + n = f.freelist[index] + f.freelist[index] = nil + f.freelist = f.freelist[:index] + f.mu.Unlock() + return +} + +func (f *FreeListG[T]) freeNode(n *node[T]) (out bool) { + f.mu.Lock() + if len(f.freelist) < cap(f.freelist) { + f.freelist = append(f.freelist, n) + out = true + } + f.mu.Unlock() + return +} + +// ItemIteratorG allows callers of {A/De}scend* to iterate in-order over portions of +// the tree. When this function returns false, iteration will stop and the +// associated Ascend* function will immediately return. +type ItemIteratorG[T any] func(item T) bool + +// Ordered represents the set of types for which the '<' operator work. +type Ordered interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 | ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~float32 | ~float64 | ~string +} + +// Less[T] returns a default LessFunc that uses the '<' operator for types that support it. +func Less[T Ordered]() LessFunc[T] { + return func(a, b T) bool { return a < b } +} + +// NewOrderedG creates a new B-Tree for ordered types. +func NewOrderedG[T Ordered](degree int) *BTreeG[T] { + return NewG[T](degree, Less[T]()) +} + +// NewG creates a new B-Tree with the given degree. +// +// NewG(2), for example, will create a 2-3-4 tree (each node contains 1-3 items +// and 2-4 children). +// +// The passed-in LessFunc determines how objects of type T are ordered. +func NewG[T any](degree int, less LessFunc[T]) *BTreeG[T] { + return NewWithFreeListG(degree, less, NewFreeListG[T](DefaultFreeListSize)) +} + +// NewWithFreeListG creates a new B-Tree that uses the given node free list. +func NewWithFreeListG[T any](degree int, less LessFunc[T], f *FreeListG[T]) *BTreeG[T] { + if degree <= 1 { + panic("bad degree") + } + return &BTreeG[T]{ + degree: degree, + cow: ©OnWriteContext[T]{freelist: f, less: less}, + } +} + +// items stores items in a node. +type items[T any] []T + +// insertAt inserts a value into the given index, pushing all subsequent values +// forward. +func (s *items[T]) insertAt(index int, item T) { + var zero T + *s = append(*s, zero) + if index < len(*s) { + copy((*s)[index+1:], (*s)[index:]) + } + (*s)[index] = item +} + +// removeAt removes a value at a given index, pulling all subsequent values +// back. +func (s *items[T]) removeAt(index int) T { + item := (*s)[index] + copy((*s)[index:], (*s)[index+1:]) + var zero T + (*s)[len(*s)-1] = zero + *s = (*s)[:len(*s)-1] + return item +} + +// pop removes and returns the last element in the list. +func (s *items[T]) pop() (out T) { + index := len(*s) - 1 + out = (*s)[index] + var zero T + (*s)[index] = zero + *s = (*s)[:index] + return +} + +// truncate truncates this instance at index so that it contains only the +// first index items. index must be less than or equal to length. +func (s *items[T]) truncate(index int) { + var toClear items[T] + *s, toClear = (*s)[:index], (*s)[index:] + var zero T + for i := 0; i < len(toClear); i++ { + toClear[i] = zero + } +} + +// find returns the index where the given item should be inserted into this +// list. 'found' is true if the item already exists in the list at the given +// index. +func (s items[T]) find(item T, less func(T, T) bool) (index int, found bool) { + i := sort.Search(len(s), func(i int) bool { + return less(item, s[i]) + }) + if i > 0 && !less(s[i-1], item) { + return i - 1, true + } + return i, false +} + +// node is an internal node in a tree. +// +// It must at all times maintain the invariant that either +// * len(children) == 0, len(items) unconstrained +// * len(children) == len(items) + 1 +type node[T any] struct { + items items[T] + children items[*node[T]] + cow *copyOnWriteContext[T] +} + +func (n *node[T]) mutableFor(cow *copyOnWriteContext[T]) *node[T] { + if n.cow == cow { + return n + } + out := cow.newNode() + if cap(out.items) >= len(n.items) { + out.items = out.items[:len(n.items)] + } else { + out.items = make(items[T], len(n.items), cap(n.items)) + } + copy(out.items, n.items) + // Copy children + if cap(out.children) >= len(n.children) { + out.children = out.children[:len(n.children)] + } else { + out.children = make(items[*node[T]], len(n.children), cap(n.children)) + } + copy(out.children, n.children) + return out +} + +func (n *node[T]) mutableChild(i int) *node[T] { + c := n.children[i].mutableFor(n.cow) + n.children[i] = c + return c +} + +// split splits the given node at the given index. The current node shrinks, +// and this function returns the item that existed at that index and a new node +// containing all items/children after it. +func (n *node[T]) split(i int) (T, *node[T]) { + item := n.items[i] + next := n.cow.newNode() + next.items = append(next.items, n.items[i+1:]...) + n.items.truncate(i) + if len(n.children) > 0 { + next.children = append(next.children, n.children[i+1:]...) + n.children.truncate(i + 1) + } + return item, next +} + +// maybeSplitChild checks if a child should be split, and if so splits it. +// Returns whether or not a split occurred. +func (n *node[T]) maybeSplitChild(i, maxItems int) bool { + if len(n.children[i].items) < maxItems { + return false + } + first := n.mutableChild(i) + item, second := first.split(maxItems / 2) + n.items.insertAt(i, item) + n.children.insertAt(i+1, second) + return true +} + +// insert inserts an item into the subtree rooted at this node, making sure +// no nodes in the subtree exceed maxItems items. Should an equivalent item be +// be found/replaced by insert, it will be returned. +func (n *node[T]) insert(item T, maxItems int) (_ T, _ bool) { + i, found := n.items.find(item, n.cow.less) + if found { + out := n.items[i] + n.items[i] = item + return out, true + } + if len(n.children) == 0 { + n.items.insertAt(i, item) + return + } + if n.maybeSplitChild(i, maxItems) { + inTree := n.items[i] + switch { + case n.cow.less(item, inTree): + // no change, we want first split node + case n.cow.less(inTree, item): + i++ // we want second split node + default: + out := n.items[i] + n.items[i] = item + return out, true + } + } + return n.mutableChild(i).insert(item, maxItems) +} + +// get finds the given key in the subtree and returns it. +func (n *node[T]) get(key T) (_ T, _ bool) { + i, found := n.items.find(key, n.cow.less) + if found { + return n.items[i], true + } else if len(n.children) > 0 { + return n.children[i].get(key) + } + return +} + +// min returns the first item in the subtree. +func min[T any](n *node[T]) (_ T, found bool) { + if n == nil { + return + } + for len(n.children) > 0 { + n = n.children[0] + } + if len(n.items) == 0 { + return + } + return n.items[0], true +} + +// max returns the last item in the subtree. +func max[T any](n *node[T]) (_ T, found bool) { + if n == nil { + return + } + for len(n.children) > 0 { + n = n.children[len(n.children)-1] + } + if len(n.items) == 0 { + return + } + return n.items[len(n.items)-1], true +} + +// toRemove details what item to remove in a node.remove call. +type toRemove int + +const ( + removeItem toRemove = iota // removes the given item + removeMin // removes smallest item in the subtree + removeMax // removes largest item in the subtree +) + +// remove removes an item from the subtree rooted at this node. +func (n *node[T]) remove(item T, minItems int, typ toRemove) (_ T, _ bool) { + var i int + var found bool + switch typ { + case removeMax: + if len(n.children) == 0 { + return n.items.pop(), true + } + i = len(n.items) + case removeMin: + if len(n.children) == 0 { + return n.items.removeAt(0), true + } + i = 0 + case removeItem: + i, found = n.items.find(item, n.cow.less) + if len(n.children) == 0 { + if found { + return n.items.removeAt(i), true + } + return + } + default: + panic("invalid type") + } + // If we get to here, we have children. + if len(n.children[i].items) <= minItems { + return n.growChildAndRemove(i, item, minItems, typ) + } + child := n.mutableChild(i) + // Either we had enough items to begin with, or we've done some + // merging/stealing, because we've got enough now and we're ready to return + // stuff. + if found { + // The item exists at index 'i', and the child we've selected can give us a + // predecessor, since if we've gotten here it's got > minItems items in it. + out := n.items[i] + // We use our special-case 'remove' call with typ=maxItem to pull the + // predecessor of item i (the rightmost leaf of our immediate left child) + // and set it into where we pulled the item from. + var zero T + n.items[i], _ = child.remove(zero, minItems, removeMax) + return out, true + } + // Final recursive call. Once we're here, we know that the item isn't in this + // node and that the child is big enough to remove from. + return child.remove(item, minItems, typ) +} + +// growChildAndRemove grows child 'i' to make sure it's possible to remove an +// item from it while keeping it at minItems, then calls remove to actually +// remove it. +// +// Most documentation says we have to do two sets of special casing: +// 1) item is in this node +// 2) item is in child +// In both cases, we need to handle the two subcases: +// A) node has enough values that it can spare one +// B) node doesn't have enough values +// For the latter, we have to check: +// a) left sibling has node to spare +// b) right sibling has node to spare +// c) we must merge +// To simplify our code here, we handle cases #1 and #2 the same: +// If a node doesn't have enough items, we make sure it does (using a,b,c). +// We then simply redo our remove call, and the second time (regardless of +// whether we're in case 1 or 2), we'll have enough items and can guarantee +// that we hit case A. +func (n *node[T]) growChildAndRemove(i int, item T, minItems int, typ toRemove) (T, bool) { + if i > 0 && len(n.children[i-1].items) > minItems { + // Steal from left child + child := n.mutableChild(i) + stealFrom := n.mutableChild(i - 1) + stolenItem := stealFrom.items.pop() + child.items.insertAt(0, n.items[i-1]) + n.items[i-1] = stolenItem + if len(stealFrom.children) > 0 { + child.children.insertAt(0, stealFrom.children.pop()) + } + } else if i < len(n.items) && len(n.children[i+1].items) > minItems { + // steal from right child + child := n.mutableChild(i) + stealFrom := n.mutableChild(i + 1) + stolenItem := stealFrom.items.removeAt(0) + child.items = append(child.items, n.items[i]) + n.items[i] = stolenItem + if len(stealFrom.children) > 0 { + child.children = append(child.children, stealFrom.children.removeAt(0)) + } + } else { + if i >= len(n.items) { + i-- + } + child := n.mutableChild(i) + // merge with right child + mergeItem := n.items.removeAt(i) + mergeChild := n.children.removeAt(i + 1) + child.items = append(child.items, mergeItem) + child.items = append(child.items, mergeChild.items...) + child.children = append(child.children, mergeChild.children...) + n.cow.freeNode(mergeChild) + } + return n.remove(item, minItems, typ) +} + +type direction int + +const ( + descend = direction(-1) + ascend = direction(+1) +) + +type optionalItem[T any] struct { + item T + valid bool +} + +func optional[T any](item T) optionalItem[T] { + return optionalItem[T]{item: item, valid: true} +} +func empty[T any]() optionalItem[T] { + return optionalItem[T]{} +} + +// iterate provides a simple method for iterating over elements in the tree. +// +// When ascending, the 'start' should be less than 'stop' and when descending, +// the 'start' should be greater than 'stop'. Setting 'includeStart' to true +// will force the iterator to include the first item when it equals 'start', +// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a +// "greaterThan" or "lessThan" queries. +func (n *node[T]) iterate(dir direction, start, stop optionalItem[T], includeStart bool, hit bool, iter ItemIteratorG[T]) (bool, bool) { + var ok, found bool + var index int + switch dir { + case ascend: + if start.valid { + index, _ = n.items.find(start.item, n.cow.less) + } + for i := index; i < len(n.items); i++ { + if len(n.children) > 0 { + if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + if !includeStart && !hit && start.valid && !n.cow.less(start.item, n.items[i]) { + hit = true + continue + } + hit = true + if stop.valid && !n.cow.less(n.items[i], stop.item) { + return hit, false + } + if !iter(n.items[i]) { + return hit, false + } + } + if len(n.children) > 0 { + if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + case descend: + if start.valid { + index, found = n.items.find(start.item, n.cow.less) + if !found { + index = index - 1 + } + } else { + index = len(n.items) - 1 + } + for i := index; i >= 0; i-- { + if start.valid && !n.cow.less(n.items[i], start.item) { + if !includeStart || hit || n.cow.less(start.item, n.items[i]) { + continue + } + } + if len(n.children) > 0 { + if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + if stop.valid && !n.cow.less(stop.item, n.items[i]) { + return hit, false // continue + } + hit = true + if !iter(n.items[i]) { + return hit, false + } + } + if len(n.children) > 0 { + if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + } + return hit, true +} + +// print is used for testing/debugging purposes. +func (n *node[T]) print(w io.Writer, level int) { + fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items) + for _, c := range n.children { + c.print(w, level+1) + } +} + +// BTreeG is a generic implementation of a B-Tree. +// +// BTreeG stores items of type T in an ordered structure, allowing easy insertion, +// removal, and iteration. +// +// Write operations are not safe for concurrent mutation by multiple +// goroutines, but Read operations are. +type BTreeG[T any] struct { + degree int + length int + root *node[T] + cow *copyOnWriteContext[T] +} + +// LessFunc[T] determines how to order a type 'T'. It should implement a strict +// ordering, and should return true if within that ordering, 'a' < 'b'. +type LessFunc[T any] func(a, b T) bool + +// copyOnWriteContext pointers determine node ownership... a tree with a write +// context equivalent to a node's write context is allowed to modify that node. +// A tree whose write context does not match a node's is not allowed to modify +// it, and must create a new, writable copy (IE: it's a Clone). +// +// When doing any write operation, we maintain the invariant that the current +// node's context is equal to the context of the tree that requested the write. +// We do this by, before we descend into any node, creating a copy with the +// correct context if the contexts don't match. +// +// Since the node we're currently visiting on any write has the requesting +// tree's context, that node is modifiable in place. Children of that node may +// not share context, but before we descend into them, we'll make a mutable +// copy. +type copyOnWriteContext[T any] struct { + freelist *FreeListG[T] + less LessFunc[T] +} + +// Clone clones the btree, lazily. Clone should not be called concurrently, +// but the original tree (t) and the new tree (t2) can be used concurrently +// once the Clone call completes. +// +// The internal tree structure of b is marked read-only and shared between t and +// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes +// whenever one of b's original nodes would have been modified. Read operations +// should have no performance degredation. Write operations for both t and t2 +// will initially experience minor slow-downs caused by additional allocs and +// copies due to the aforementioned copy-on-write logic, but should converge to +// the original performance characteristics of the original tree. +func (t *BTreeG[T]) Clone() (t2 *BTreeG[T]) { + // Create two entirely new copy-on-write contexts. + // This operation effectively creates three trees: + // the original, shared nodes (old b.cow) + // the new b.cow nodes + // the new out.cow nodes + cow1, cow2 := *t.cow, *t.cow + out := *t + t.cow = &cow1 + out.cow = &cow2 + return &out +} + +// maxItems returns the max number of items to allow per node. +func (t *BTreeG[T]) maxItems() int { + return t.degree*2 - 1 +} + +// minItems returns the min number of items to allow per node (ignored for the +// root node). +func (t *BTreeG[T]) minItems() int { + return t.degree - 1 +} + +func (c *copyOnWriteContext[T]) newNode() (n *node[T]) { + n = c.freelist.newNode() + n.cow = c + return +} + +type freeType int + +const ( + ftFreelistFull freeType = iota // node was freed (available for GC, not stored in freelist) + ftStored // node was stored in the freelist for later use + ftNotOwned // node was ignored by COW, since it's owned by another one +) + +// freeNode frees a node within a given COW context, if it's owned by that +// context. It returns what happened to the node (see freeType const +// documentation). +func (c *copyOnWriteContext[T]) freeNode(n *node[T]) freeType { + if n.cow == c { + // clear to allow GC + n.items.truncate(0) + n.children.truncate(0) + n.cow = nil + if c.freelist.freeNode(n) { + return ftStored + } else { + return ftFreelistFull + } + } else { + return ftNotOwned + } +} + +// ReplaceOrInsert adds the given item to the tree. If an item in the tree +// already equals the given one, it is removed from the tree and returned, +// and the second return value is true. Otherwise, (zeroValue, false) +// +// nil cannot be added to the tree (will panic). +func (t *BTreeG[T]) ReplaceOrInsert(item T) (_ T, _ bool) { + if t.root == nil { + t.root = t.cow.newNode() + t.root.items = append(t.root.items, item) + t.length++ + return + } else { + t.root = t.root.mutableFor(t.cow) + if len(t.root.items) >= t.maxItems() { + item2, second := t.root.split(t.maxItems() / 2) + oldroot := t.root + t.root = t.cow.newNode() + t.root.items = append(t.root.items, item2) + t.root.children = append(t.root.children, oldroot, second) + } + } + out, outb := t.root.insert(item, t.maxItems()) + if !outb { + t.length++ + } + return out, outb +} + +// Delete removes an item equal to the passed in item from the tree, returning +// it. If no such item exists, returns (zeroValue, false). +func (t *BTreeG[T]) Delete(item T) (T, bool) { + return t.deleteItem(item, removeItem) +} + +// DeleteMin removes the smallest item in the tree and returns it. +// If no such item exists, returns (zeroValue, false). +func (t *BTreeG[T]) DeleteMin() (T, bool) { + var zero T + return t.deleteItem(zero, removeMin) +} + +// DeleteMax removes the largest item in the tree and returns it. +// If no such item exists, returns (zeroValue, false). +func (t *BTreeG[T]) DeleteMax() (T, bool) { + var zero T + return t.deleteItem(zero, removeMax) +} + +func (t *BTreeG[T]) deleteItem(item T, typ toRemove) (_ T, _ bool) { + if t.root == nil || len(t.root.items) == 0 { + return + } + t.root = t.root.mutableFor(t.cow) + out, outb := t.root.remove(item, t.minItems(), typ) + if len(t.root.items) == 0 && len(t.root.children) > 0 { + oldroot := t.root + t.root = t.root.children[0] + t.cow.freeNode(oldroot) + } + if outb { + t.length-- + } + return out, outb +} + +// AscendRange calls the iterator for every value in the tree within the range +// [greaterOrEqual, lessThan), until iterator returns false. +func (t *BTreeG[T]) AscendRange(greaterOrEqual, lessThan T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(ascend, optional[T](greaterOrEqual), optional[T](lessThan), true, false, iterator) +} + +// AscendLessThan calls the iterator for every value in the tree within the range +// [first, pivot), until iterator returns false. +func (t *BTreeG[T]) AscendLessThan(pivot T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(ascend, empty[T](), optional(pivot), false, false, iterator) +} + +// AscendGreaterOrEqual calls the iterator for every value in the tree within +// the range [pivot, last], until iterator returns false. +func (t *BTreeG[T]) AscendGreaterOrEqual(pivot T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(ascend, optional[T](pivot), empty[T](), true, false, iterator) +} + +// Ascend calls the iterator for every value in the tree within the range +// [first, last], until iterator returns false. +func (t *BTreeG[T]) Ascend(iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(ascend, empty[T](), empty[T](), false, false, iterator) +} + +// DescendRange calls the iterator for every value in the tree within the range +// [lessOrEqual, greaterThan), until iterator returns false. +func (t *BTreeG[T]) DescendRange(lessOrEqual, greaterThan T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(descend, optional[T](lessOrEqual), optional[T](greaterThan), true, false, iterator) +} + +// DescendLessOrEqual calls the iterator for every value in the tree within the range +// [pivot, first], until iterator returns false. +func (t *BTreeG[T]) DescendLessOrEqual(pivot T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(descend, optional[T](pivot), empty[T](), true, false, iterator) +} + +// DescendGreaterThan calls the iterator for every value in the tree within +// the range [last, pivot), until iterator returns false. +func (t *BTreeG[T]) DescendGreaterThan(pivot T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(descend, empty[T](), optional[T](pivot), false, false, iterator) +} + +// Descend calls the iterator for every value in the tree within the range +// [last, first], until iterator returns false. +func (t *BTreeG[T]) Descend(iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(descend, empty[T](), empty[T](), false, false, iterator) +} + +// Get looks for the key item in the tree, returning it. It returns +// (zeroValue, false) if unable to find that item. +func (t *BTreeG[T]) Get(key T) (_ T, _ bool) { + if t.root == nil { + return + } + return t.root.get(key) +} + +// Min returns the smallest item in the tree, or (zeroValue, false) if the tree is empty. +func (t *BTreeG[T]) Min() (_ T, _ bool) { + return min(t.root) +} + +// Max returns the largest item in the tree, or (zeroValue, false) if the tree is empty. +func (t *BTreeG[T]) Max() (_ T, _ bool) { + return max(t.root) +} + +// Has returns true if the given key is in the tree. +func (t *BTreeG[T]) Has(key T) bool { + _, ok := t.Get(key) + return ok +} + +// Len returns the number of items currently in the tree. +func (t *BTreeG[T]) Len() int { + return t.length +} + +// Clear removes all items from the btree. If addNodesToFreelist is true, +// t's nodes are added to its freelist as part of this call, until the freelist +// is full. Otherwise, the root node is simply dereferenced and the subtree +// left to Go's normal GC processes. +// +// This can be much faster +// than calling Delete on all elements, because that requires finding/removing +// each element in the tree and updating the tree accordingly. It also is +// somewhat faster than creating a new tree to replace the old one, because +// nodes from the old tree are reclaimed into the freelist for use by the new +// one, instead of being lost to the garbage collector. +// +// This call takes: +// O(1): when addNodesToFreelist is false, this is a single operation. +// O(1): when the freelist is already full, it breaks out immediately +// O(freelist size): when the freelist is empty and the nodes are all owned +// by this tree, nodes are added to the freelist until full. +// O(tree size): when all nodes are owned by another tree, all nodes are +// iterated over looking for nodes to add to the freelist, and due to +// ownership, none are. +func (t *BTreeG[T]) Clear(addNodesToFreelist bool) { + if t.root != nil && addNodesToFreelist { + t.root.reset(t.cow) + } + t.root, t.length = nil, 0 +} + +// reset returns a subtree to the freelist. It breaks out immediately if the +// freelist is full, since the only benefit of iterating is to fill that +// freelist up. Returns true if parent reset call should continue. +func (n *node[T]) reset(c *copyOnWriteContext[T]) bool { + for _, child := range n.children { + if !child.reset(c) { + return false + } + } + return c.freeNode(n) != ftFreelistFull +} + +// Int implements the Item interface for integers. +type Int int + +// Less returns true if int(a) < int(b). +func (a Int) Less(b Item) bool { + return a < b.(Int) +} + +// BTree is an implementation of a B-Tree. +// +// BTree stores Item instances in an ordered structure, allowing easy insertion, +// removal, and iteration. +// +// Write operations are not safe for concurrent mutation by multiple +// goroutines, but Read operations are. +type BTree BTreeG[Item] + +var itemLess LessFunc[Item] = func(a, b Item) bool { + return a.Less(b) +} + +// New creates a new B-Tree with the given degree. +// +// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items +// and 2-4 children). +func New(degree int) *BTree { + return (*BTree)(NewG[Item](degree, itemLess)) +} + +// FreeList represents a free list of btree nodes. By default each +// BTree has its own FreeList, but multiple BTrees can share the same +// FreeList. +// Two Btrees using the same freelist are safe for concurrent write access. +type FreeList FreeListG[Item] + +// NewFreeList creates a new free list. +// size is the maximum size of the returned free list. +func NewFreeList(size int) *FreeList { + return (*FreeList)(NewFreeListG[Item](size)) +} + +// NewWithFreeList creates a new B-Tree that uses the given node free list. +func NewWithFreeList(degree int, f *FreeList) *BTree { + return (*BTree)(NewWithFreeListG[Item](degree, itemLess, (*FreeListG[Item])(f))) +} + +// ItemIterator allows callers of Ascend* to iterate in-order over portions of +// the tree. When this function returns false, iteration will stop and the +// associated Ascend* function will immediately return. +type ItemIterator ItemIteratorG[Item] + +// Clone clones the btree, lazily. Clone should not be called concurrently, +// but the original tree (t) and the new tree (t2) can be used concurrently +// once the Clone call completes. +// +// The internal tree structure of b is marked read-only and shared between t and +// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes +// whenever one of b's original nodes would have been modified. Read operations +// should have no performance degredation. Write operations for both t and t2 +// will initially experience minor slow-downs caused by additional allocs and +// copies due to the aforementioned copy-on-write logic, but should converge to +// the original performance characteristics of the original tree. +func (t *BTree) Clone() (t2 *BTree) { + return (*BTree)((*BTreeG[Item])(t).Clone()) +} + +// Delete removes an item equal to the passed in item from the tree, returning +// it. If no such item exists, returns nil. +func (t *BTree) Delete(item Item) Item { + i, _ := (*BTreeG[Item])(t).Delete(item) + return i +} + +// DeleteMax removes the largest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMax() Item { + i, _ := (*BTreeG[Item])(t).DeleteMax() + return i +} + +// DeleteMin removes the smallest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMin() Item { + i, _ := (*BTreeG[Item])(t).DeleteMin() + return i +} + +// Get looks for the key item in the tree, returning it. It returns nil if +// unable to find that item. +func (t *BTree) Get(key Item) Item { + i, _ := (*BTreeG[Item])(t).Get(key) + return i +} + +// Max returns the largest item in the tree, or nil if the tree is empty. +func (t *BTree) Max() Item { + i, _ := (*BTreeG[Item])(t).Max() + return i +} + +// Min returns the smallest item in the tree, or nil if the tree is empty. +func (t *BTree) Min() Item { + i, _ := (*BTreeG[Item])(t).Min() + return i +} + +// Has returns true if the given key is in the tree. +func (t *BTree) Has(key Item) bool { + return (*BTreeG[Item])(t).Has(key) +} + +// ReplaceOrInsert adds the given item to the tree. If an item in the tree +// already equals the given one, it is removed from the tree and returned. +// Otherwise, nil is returned. +// +// nil cannot be added to the tree (will panic). +func (t *BTree) ReplaceOrInsert(item Item) Item { + i, _ := (*BTreeG[Item])(t).ReplaceOrInsert(item) + return i +} + +// AscendRange calls the iterator for every value in the tree within the range +// [greaterOrEqual, lessThan), until iterator returns false. +func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) { + (*BTreeG[Item])(t).AscendRange(greaterOrEqual, lessThan, (ItemIteratorG[Item])(iterator)) +} + +// AscendLessThan calls the iterator for every value in the tree within the range +// [first, pivot), until iterator returns false. +func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) { + (*BTreeG[Item])(t).AscendLessThan(pivot, (ItemIteratorG[Item])(iterator)) +} + +// AscendGreaterOrEqual calls the iterator for every value in the tree within +// the range [pivot, last], until iterator returns false. +func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) { + (*BTreeG[Item])(t).AscendGreaterOrEqual(pivot, (ItemIteratorG[Item])(iterator)) +} + +// Ascend calls the iterator for every value in the tree within the range +// [first, last], until iterator returns false. +func (t *BTree) Ascend(iterator ItemIterator) { + (*BTreeG[Item])(t).Ascend((ItemIteratorG[Item])(iterator)) +} + +// DescendRange calls the iterator for every value in the tree within the range +// [lessOrEqual, greaterThan), until iterator returns false. +func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) { + (*BTreeG[Item])(t).DescendRange(lessOrEqual, greaterThan, (ItemIteratorG[Item])(iterator)) +} + +// DescendLessOrEqual calls the iterator for every value in the tree within the range +// [pivot, first], until iterator returns false. +func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) { + (*BTreeG[Item])(t).DescendLessOrEqual(pivot, (ItemIteratorG[Item])(iterator)) +} + +// DescendGreaterThan calls the iterator for every value in the tree within +// the range [last, pivot), until iterator returns false. +func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) { + (*BTreeG[Item])(t).DescendGreaterThan(pivot, (ItemIteratorG[Item])(iterator)) +} + +// Descend calls the iterator for every value in the tree within the range +// [last, first], until iterator returns false. +func (t *BTree) Descend(iterator ItemIterator) { + (*BTreeG[Item])(t).Descend((ItemIteratorG[Item])(iterator)) +} + +// Len returns the number of items currently in the tree. +func (t *BTree) Len() int { + return (*BTreeG[Item])(t).Len() +} + +// Clear removes all items from the btree. If addNodesToFreelist is true, +// t's nodes are added to its freelist as part of this call, until the freelist +// is full. Otherwise, the root node is simply dereferenced and the subtree +// left to Go's normal GC processes. +// +// This can be much faster +// than calling Delete on all elements, because that requires finding/removing +// each element in the tree and updating the tree accordingly. It also is +// somewhat faster than creating a new tree to replace the old one, because +// nodes from the old tree are reclaimed into the freelist for use by the new +// one, instead of being lost to the garbage collector. +// +// This call takes: +// O(1): when addNodesToFreelist is false, this is a single operation. +// O(1): when the freelist is already full, it breaks out immediately +// O(freelist size): when the freelist is empty and the nodes are all owned +// by this tree, nodes are added to the freelist until full. +// O(tree size): when all nodes are owned by another tree, all nodes are +// iterated over looking for nodes to add to the freelist, and due to +// ownership, none are. +func (t *BTree) Clear(addNodesToFreelist bool) { + (*BTreeG[Item])(t).Clear(addNodesToFreelist) +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.gitignore b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.gitignore deleted file mode 100644 index 826caa3902bb8..0000000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.gitignore +++ /dev/null @@ -1,204 +0,0 @@ -# Created by .ignore support plugin (hsz.mobi) -### Go template -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof -### Windows template -# Windows image file caches -Thumbs.db -ehthumbs.db - -# Folder config file -Desktop.ini - -# Recycle Bin used on file shares -$RECYCLE.BIN/ - -# Windows Installer files -*.cab -*.msi -*.msm -*.msp - -# Windows shortcuts -*.lnk -### Kate template -# Swap Files # -.*.kate-swp -.swp.* -### SublimeText template -# cache files for sublime text -*.tmlanguage.cache -*.tmPreferences.cache -*.stTheme.cache - -# workspace files are user-specific -*.sublime-workspace - -# project files should be checked into the repository, unless a significant -# proportion of contributors will probably not be using SublimeText -# *.sublime-project - -# sftp configuration file -sftp-config.json -### Linux template -*~ - -# temporary files which can be created if a process still has a handle open of a deleted file -.fuse_hidden* - -# KDE directory preferences -.directory - -# Linux trash folder which might appear on any partition or disk -.Trash-* -### JetBrains template -# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm -# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 - -# User-specific stuff: -.idea -.idea/tasks.xml -.idea/dictionaries -.idea/vcs.xml -.idea/jsLibraryMappings.xml - -# Sensitive or high-churn files: -.idea/dataSources.ids -.idea/dataSources.xml -.idea/dataSources.local.xml -.idea/sqlDataSources.xml -.idea/dynamic.xml -.idea/uiDesigner.xml - -# Gradle: -.idea/gradle.xml -.idea/libraries - -# Mongo Explorer plugin: -.idea/mongoSettings.xml - -## File-based project format: -*.iws - -## Plugin-specific files: - -# IntelliJ -/out/ - -# mpeltonen/sbt-idea plugin -.idea_modules/ - -# JIRA plugin -atlassian-ide-plugin.xml - -# Crashlytics plugin (for Android Studio and IntelliJ) -com_crashlytics_export_strings.xml -crashlytics.properties -crashlytics-build.properties -fabric.properties -### Xcode template -# Xcode -# -# gitignore contributors: remember to update Global/Xcode.gitignore, Objective-C.gitignore & Swift.gitignore - -## Build generated -build/ -DerivedData/ - -## Various settings -*.pbxuser -!default.pbxuser -*.mode1v3 -!default.mode1v3 -*.mode2v3 -!default.mode2v3 -*.perspectivev3 -!default.perspectivev3 -xcuserdata/ - -## Other -*.moved-aside -*.xccheckout -*.xcscmblueprint -### Eclipse template - -.metadata -bin/ -tmp/ -*.tmp -*.bak -*.swp -*~.nib -local.properties -.settings/ -.loadpath -.recommenders - -# Eclipse Core -.project - -# External tool builders -.externalToolBuilders/ - -# Locally stored "Eclipse launch configurations" -*.launch - -# PyDev specific (Python IDE for Eclipse) -*.pydevproject - -# CDT-specific (C/C++ Development Tooling) -.cproject - -# JDT-specific (Eclipse Java Development Tools) -.classpath - -# Java annotation processor (APT) -.factorypath - -# PDT-specific (PHP Development Tools) -.buildpath - -# sbteclipse plugin -.target - -# Tern plugin -.tern-project - -# TeXlipse plugin -.texlipse - -# STS (Spring Tool Suite) -.springBeans - -# Code Recommenders -.recommenders/ - - -coverage.txt - -#vendor -vendor/ - -.envrc \ No newline at end of file diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.travis.yml b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.travis.yml deleted file mode 100644 index fc198d8827977..0000000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.travis.yml +++ /dev/null @@ -1,16 +0,0 @@ -sudo: false -language: go -go: - - 1.13.x - - 1.14.x - - 1.15.x - -env: - global: - - GO111MODULE=on - -script: - - make test - -after_success: - - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/CHANGELOG.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/CHANGELOG.md deleted file mode 100644 index 6eeb7e2dc3f28..0000000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/CHANGELOG.md +++ /dev/null @@ -1,51 +0,0 @@ -# Changelog -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) -and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). - -Types of changes: -- `Added` for new features. -- `Changed` for changes in existing functionality. -- `Deprecated` for soon-to-be removed features. -- `Removed` for now removed features. -- `Fixed` for any bug fixes. -- `Security` in case of vulnerabilities. - -## [Unreleased] - -### Added - -- [#223](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/223) Add go-kit logging middleware - [adrien-f](https://github.com/adrien-f) - -## [v1.1.0] - 2019-09-12 -### Added -- [#226](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/226) Support for go modules. -- [#221](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/221) logging/zap add support for gRPC LoggerV2 - [kush-patel-hs](https://github.com/kush-patel-hs) -- [#181](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/181) Rate Limit support - [ceshihao](https://github.com/ceshihao) -- [#161](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/161) Retry on server stream call - [lonnblad](https://github.com/lonnblad) -- [#152](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/152) Exponential backoff functions - [polyfloyd](https://github.com/polyfloyd) -- [#147](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/147) Jaeger support for ctxtags extraction - [vporoshok](https://github.com/vporoshok) -- [#184](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/184) ctxTags identifies if the call was sampled - -### Deprecated -- [#201](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/201) `golang.org/x/net/context` - [houz42](https://github.com/houz42) -- [#183](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/183) Documentation Generation in favour of . - -### Fixed -- [172](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/172) Passing ctx into retry and recover - [johanbrandhorst](https://github.com/johanbrandhorst) -- Numerious documentation fixes. - -## v1.0.0 - 2018-05-08 -### Added -- grpc_auth -- grpc_ctxtags -- grpc_zap -- grpc_logrus -- grpc_opentracing -- grpc_retry -- grpc_validator -- grpc_recovery - -[Unreleased]: https://github.com/grpc-ecosystem/go-grpc-middleware/compare/v1.1.0...HEAD -[v1.1.0]: https://github.com/grpc-ecosystem/go-grpc-middleware/compare/v1.0.0...v1.1.0 diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/CONTRIBUTING.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/CONTRIBUTING.md deleted file mode 100644 index dd52ab8938e05..0000000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/CONTRIBUTING.md +++ /dev/null @@ -1,20 +0,0 @@ -# Contributing - -We would love to have people submit pull requests and help make `grpc-ecosystem/go-grpc-middleware` even better 👍. - -Fork, then clone the repo: - -```bash -git clone git@github.com:your-username/go-grpc-middleware.git -``` - -Before checking in please run the following: - -```bash -make all -``` - -This will `vet`, `fmt`, regenerate documentation and run all tests. - - -Push to your fork and open a pull request. \ No newline at end of file diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md deleted file mode 100644 index 814e155176bd9..0000000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md +++ /dev/null @@ -1,86 +0,0 @@ -# Go gRPC Middleware - -[![Travis Build](https://travis-ci.org/grpc-ecosystem/go-grpc-middleware.svg?branch=master)](https://travis-ci.org/grpc-ecosystem/go-grpc-middleware) -[![Go Report Card](https://goreportcard.com/badge/github.com/grpc-ecosystem/go-grpc-middleware)](https://goreportcard.com/report/github.com/grpc-ecosystem/go-grpc-middleware) -[![GoDoc](http://img.shields.io/badge/GoDoc-Reference-blue.svg)](https://godoc.org/github.com/grpc-ecosystem/go-grpc-middleware) -[![SourceGraph](https://sourcegraph.com/github.com/grpc-ecosystem/go-grpc-middleware/-/badge.svg)](https://sourcegraph.com/github.com/grpc-ecosystem/go-grpc-middleware/?badge) -[![codecov](https://codecov.io/gh/grpc-ecosystem/go-grpc-middleware/branch/master/graph/badge.svg)](https://codecov.io/gh/grpc-ecosystem/go-grpc-middleware) -[![Apache 2.0 License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE) -[![quality: production](https://img.shields.io/badge/quality-production-orange.svg)](#status) -[![Slack](https://img.shields.io/badge/slack-%23grpc--middleware-brightgreen)](https://slack.com/share/IRUQCFC23/9Tm7hxRFVKKNoajQfMOcUiIk/enQtODc4ODI4NTIyMDcxLWM5NDA0ZTE4Njg5YjRjYWZkMTI5MzQwNDY3YzBjMzE1YzdjOGM5ZjI1NDNiM2JmNzI2YjM5ODE5OTRiNTEyOWE) - -[gRPC Go](https://github.com/grpc/grpc-go) Middleware: interceptors, helpers, utilities. - -## Middleware - -[gRPC Go](https://github.com/grpc/grpc-go) recently acquired support for -Interceptors, i.e. [middleware](https://medium.com/@matryer/writing-middleware-in-golang-and-how-go-makes-it-so-much-fun-4375c1246e81#.gv7tdlghs) -that is executed either on the gRPC Server before the request is passed onto the user's application logic, or on the gRPC client around the user call. It is a perfect way to implement -common patterns: auth, logging, message, validation, retries or monitoring. - -These are generic building blocks that make it easy to build multiple microservices easily. -The purpose of this repository is to act as a go-to point for such reusable functionality. It contains -some of them itself, but also will link to useful external repos. - -`grpc_middleware` itself provides support for chaining interceptors, here's an example: - -```go -import "github.com/grpc-ecosystem/go-grpc-middleware" - -myServer := grpc.NewServer( - grpc.StreamInterceptor(grpc_middleware.ChainStreamServer( - grpc_recovery.StreamServerInterceptor(), - grpc_ctxtags.StreamServerInterceptor(), - grpc_opentracing.StreamServerInterceptor(), - grpc_prometheus.StreamServerInterceptor, - grpc_zap.StreamServerInterceptor(zapLogger), - grpc_auth.StreamServerInterceptor(myAuthFunction), - )), - grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer( - grpc_recovery.UnaryServerInterceptor(), - grpc_ctxtags.UnaryServerInterceptor(), - grpc_opentracing.UnaryServerInterceptor(), - grpc_prometheus.UnaryServerInterceptor, - grpc_zap.UnaryServerInterceptor(zapLogger), - grpc_auth.UnaryServerInterceptor(myAuthFunction), - )), -) -``` - -## Interceptors - -*Please send a PR to add new interceptors or middleware to this list* - -#### Auth - * [`grpc_auth`](auth) - a customizable (via `AuthFunc`) piece of auth middleware - -#### Logging - * [`grpc_ctxtags`](tags/) - a library that adds a `Tag` map to context, with data populated from request body - * [`grpc_zap`](logging/zap/) - integration of [zap](https://github.com/uber-go/zap) logging library into gRPC handlers. - * [`grpc_logrus`](logging/logrus/) - integration of [logrus](https://github.com/sirupsen/logrus) logging library into gRPC handlers. - * [`grpc_kit`](logging/kit/) - integration of [go-kit](https://github.com/go-kit/kit/tree/master/log) logging library into gRPC handlers. - * [`grpc_grpc_logsettable`](logging/settable/) - a wrapper around `grpclog.LoggerV2` that allows to replace loggers in runtime (thread-safe). - -#### Monitoring - * [`grpc_prometheus`⚡](https://github.com/grpc-ecosystem/go-grpc-prometheus) - Prometheus client-side and server-side monitoring middleware - * [`otgrpc`⚡](https://github.com/grpc-ecosystem/grpc-opentracing/tree/master/go/otgrpc) - [OpenTracing](http://opentracing.io/) client-side and server-side interceptors - * [`grpc_opentracing`](tracing/opentracing) - [OpenTracing](http://opentracing.io/) client-side and server-side interceptors with support for streaming and handler-returned tags - -#### Client - * [`grpc_retry`](retry/) - a generic gRPC response code retry mechanism, client-side middleware - -#### Server - * [`grpc_validator`](validator/) - codegen inbound message validation from `.proto` options - * [`grpc_recovery`](recovery/) - turn panics into gRPC errors - * [`ratelimit`](ratelimit/) - grpc rate limiting by your own limiter - - -## Status - -This code has been running in *production* since May 2016 as the basis of the gRPC micro services stack at [Improbable](https://improbable.io). - -Additional tooling will be added, and contributions are welcome. - -## License - -`go-grpc-middleware` is released under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details. diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go deleted file mode 100644 index ea3738b896cfd..0000000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2016 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -// gRPC Server Interceptor chaining middleware. - -package grpc_middleware - -import ( - "context" - - "google.golang.org/grpc" -) - -// ChainUnaryServer creates a single interceptor out of a chain of many interceptors. -// -// Execution is done in left-to-right order, including passing of context. -// For example ChainUnaryServer(one, two, three) will execute one before two before three, and three -// will see context changes of one and two. -func ChainUnaryServer(interceptors ...grpc.UnaryServerInterceptor) grpc.UnaryServerInterceptor { - n := len(interceptors) - - return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - chainer := func(currentInter grpc.UnaryServerInterceptor, currentHandler grpc.UnaryHandler) grpc.UnaryHandler { - return func(currentCtx context.Context, currentReq interface{}) (interface{}, error) { - return currentInter(currentCtx, currentReq, info, currentHandler) - } - } - - chainedHandler := handler - for i := n - 1; i >= 0; i-- { - chainedHandler = chainer(interceptors[i], chainedHandler) - } - - return chainedHandler(ctx, req) - } -} - -// ChainStreamServer creates a single interceptor out of a chain of many interceptors. -// -// Execution is done in left-to-right order, including passing of context. -// For example ChainUnaryServer(one, two, three) will execute one before two before three. -// If you want to pass context between interceptors, use WrapServerStream. -func ChainStreamServer(interceptors ...grpc.StreamServerInterceptor) grpc.StreamServerInterceptor { - n := len(interceptors) - - return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - chainer := func(currentInter grpc.StreamServerInterceptor, currentHandler grpc.StreamHandler) grpc.StreamHandler { - return func(currentSrv interface{}, currentStream grpc.ServerStream) error { - return currentInter(currentSrv, currentStream, info, currentHandler) - } - } - - chainedHandler := handler - for i := n - 1; i >= 0; i-- { - chainedHandler = chainer(interceptors[i], chainedHandler) - } - - return chainedHandler(srv, ss) - } -} - -// ChainUnaryClient creates a single interceptor out of a chain of many interceptors. -// -// Execution is done in left-to-right order, including passing of context. -// For example ChainUnaryClient(one, two, three) will execute one before two before three. -func ChainUnaryClient(interceptors ...grpc.UnaryClientInterceptor) grpc.UnaryClientInterceptor { - n := len(interceptors) - - return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { - chainer := func(currentInter grpc.UnaryClientInterceptor, currentInvoker grpc.UnaryInvoker) grpc.UnaryInvoker { - return func(currentCtx context.Context, currentMethod string, currentReq, currentRepl interface{}, currentConn *grpc.ClientConn, currentOpts ...grpc.CallOption) error { - return currentInter(currentCtx, currentMethod, currentReq, currentRepl, currentConn, currentInvoker, currentOpts...) - } - } - - chainedInvoker := invoker - for i := n - 1; i >= 0; i-- { - chainedInvoker = chainer(interceptors[i], chainedInvoker) - } - - return chainedInvoker(ctx, method, req, reply, cc, opts...) - } -} - -// ChainStreamClient creates a single interceptor out of a chain of many interceptors. -// -// Execution is done in left-to-right order, including passing of context. -// For example ChainStreamClient(one, two, three) will execute one before two before three. -func ChainStreamClient(interceptors ...grpc.StreamClientInterceptor) grpc.StreamClientInterceptor { - n := len(interceptors) - - return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { - chainer := func(currentInter grpc.StreamClientInterceptor, currentStreamer grpc.Streamer) grpc.Streamer { - return func(currentCtx context.Context, currentDesc *grpc.StreamDesc, currentConn *grpc.ClientConn, currentMethod string, currentOpts ...grpc.CallOption) (grpc.ClientStream, error) { - return currentInter(currentCtx, currentDesc, currentConn, currentMethod, currentStreamer, currentOpts...) - } - } - - chainedStreamer := streamer - for i := n - 1; i >= 0; i-- { - chainedStreamer = chainer(interceptors[i], chainedStreamer) - } - - return chainedStreamer(ctx, desc, cc, method, opts...) - } -} - -// Chain creates a single interceptor out of a chain of many interceptors. -// -// WithUnaryServerChain is a grpc.Server config option that accepts multiple unary interceptors. -// Basically syntactic sugar. -func WithUnaryServerChain(interceptors ...grpc.UnaryServerInterceptor) grpc.ServerOption { - return grpc.UnaryInterceptor(ChainUnaryServer(interceptors...)) -} - -// WithStreamServerChain is a grpc.Server config option that accepts multiple stream interceptors. -// Basically syntactic sugar. -func WithStreamServerChain(interceptors ...grpc.StreamServerInterceptor) grpc.ServerOption { - return grpc.StreamInterceptor(ChainStreamServer(interceptors...)) -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/doc.go deleted file mode 100644 index 718e10046a224..0000000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/doc.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2016 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -/* -`grpc_middleware` is a collection of gRPC middleware packages: interceptors, helpers and tools. - -Middleware - -gRPC is a fantastic RPC middleware, which sees a lot of adoption in the Golang world. However, the -upstream gRPC codebase is relatively bare bones. - -This package, and most of its child packages provides commonly needed middleware for gRPC: -client-side interceptors for retires, server-side interceptors for input validation and auth, -functions for chaining said interceptors, metadata convenience methods and more. - -Chaining - -By default, gRPC doesn't allow one to have more than one interceptor either on the client nor on -the server side. `grpc_middleware` provides convenient chaining methods - -Simple way of turning a multiple interceptors into a single interceptor. Here's an example for -server chaining: - - myServer := grpc.NewServer( - grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(loggingStream, monitoringStream, authStream)), - grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(loggingUnary, monitoringUnary, authUnary)), - ) - -These interceptors will be executed from left to right: logging, monitoring and auth. - -Here's an example for client side chaining: - - clientConn, err = grpc.Dial( - address, - grpc.WithUnaryInterceptor(grpc_middleware.ChainUnaryClient(monitoringClientUnary, retryUnary)), - grpc.WithStreamInterceptor(grpc_middleware.ChainStreamClient(monitoringClientStream, retryStream)), - ) - client = pb_testproto.NewTestServiceClient(clientConn) - resp, err := client.PingEmpty(s.ctx, &myservice.Request{Msg: "hello"}) - -These interceptors will be executed from left to right: monitoring and then retry logic. - -The retry interceptor will call every interceptor that follows it whenever when a retry happens. - -Writing Your Own - -Implementing your own interceptor is pretty trivial: there are interfaces for that. But the interesting -bit exposing common data to handlers (and other middleware), similarly to HTTP Middleware design. -For example, you may want to pass the identity of the caller from the auth interceptor all the way -to the handling function. - -For example, a client side interceptor example for auth looks like: - - func FakeAuthUnaryInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - newCtx := context.WithValue(ctx, "user_id", "john@example.com") - return handler(newCtx, req) - } - -Unfortunately, it's not as easy for streaming RPCs. These have the `context.Context` embedded within -the `grpc.ServerStream` object. To pass values through context, a wrapper (`WrappedServerStream`) is -needed. For example: - - func FakeAuthStreamingInterceptor(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - newStream := grpc_middleware.WrapServerStream(stream) - newStream.WrappedContext = context.WithValue(ctx, "user_id", "john@example.com") - return handler(srv, newStream) - } -*/ -package grpc_middleware diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/makefile b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/makefile deleted file mode 100644 index b18d2d2bb1199..0000000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/makefile +++ /dev/null @@ -1,17 +0,0 @@ -SHELL=/bin/bash - -GOFILES_NOVENDOR = $(shell go list ./... | grep -v /vendor/) - -all: vet fmt test - -fmt: - go fmt $(GOFILES_NOVENDOR) - -vet: - # do not check lostcancel, they are intentional. - go vet -lostcancel=false $(GOFILES_NOVENDOR) - -test: vet - ./scripts/test_all.sh - -.PHONY: all test diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/slack.png b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/slack.png deleted file mode 100644 index cc8f9a68a9368..0000000000000 Binary files a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/slack.png and /dev/null differ diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/wrappers.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/wrappers.go deleted file mode 100644 index 05ccfb3f24a79..0000000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/wrappers.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2016 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -package grpc_middleware - -import ( - "context" - - "google.golang.org/grpc" -) - -// WrappedServerStream is a thin wrapper around grpc.ServerStream that allows modifying context. -type WrappedServerStream struct { - grpc.ServerStream - // WrappedContext is the wrapper's own Context. You can assign it. - WrappedContext context.Context -} - -// Context returns the wrapper's WrappedContext, overwriting the nested grpc.ServerStream.Context() -func (w *WrappedServerStream) Context() context.Context { - return w.WrappedContext -} - -// WrapServerStream returns a ServerStream that has the ability to overwrite context. -func WrapServerStream(stream grpc.ServerStream) *WrappedServerStream { - if existing, ok := stream.(*WrappedServerStream); ok { - return existing - } - return &WrappedServerStream{ServerStream: stream, WrappedContext: stream.Context()} -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt b/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt deleted file mode 100644 index 364516251b93b..0000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2015, Gengo, Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the name of Gengo, Inc. nor the names of its - contributors may be used to endorse or promote products derived from this - software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel deleted file mode 100644 index 5242751fb2d5a..0000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@rules_proto//proto:defs.bzl", "proto_library") -load("@io_bazel_rules_go//go:def.bzl", "go_library") -load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") - -package(default_visibility = ["//visibility:public"]) - -proto_library( - name = "internal_proto", - srcs = ["errors.proto"], - deps = ["@com_google_protobuf//:any_proto"], -) - -go_proto_library( - name = "internal_go_proto", - importpath = "github.com/grpc-ecosystem/grpc-gateway/internal", - proto = ":internal_proto", -) - -go_library( - name = "go_default_library", - embed = [":internal_go_proto"], - importpath = "github.com/grpc-ecosystem/grpc-gateway/internal", -) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.pb.go deleted file mode 100644 index 61101d7177f05..0000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.pb.go +++ /dev/null @@ -1,189 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: internal/errors.proto - -package internal - -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - any "github.com/golang/protobuf/ptypes/any" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -// Error is the generic error returned from unary RPCs. -type Error struct { - Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` - // This is to make the error more compatible with users that expect errors to be Status objects: - // https://github.com/grpc/grpc/blob/master/src/proto/grpc/status/status.proto - // It should be the exact same message as the Error field. - Code int32 `protobuf:"varint,2,opt,name=code,proto3" json:"code,omitempty"` - Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"` - Details []*any.Any `protobuf:"bytes,4,rep,name=details,proto3" json:"details,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Error) Reset() { *m = Error{} } -func (m *Error) String() string { return proto.CompactTextString(m) } -func (*Error) ProtoMessage() {} -func (*Error) Descriptor() ([]byte, []int) { - return fileDescriptor_9b093362ca6d1e03, []int{0} -} - -func (m *Error) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Error.Unmarshal(m, b) -} -func (m *Error) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Error.Marshal(b, m, deterministic) -} -func (m *Error) XXX_Merge(src proto.Message) { - xxx_messageInfo_Error.Merge(m, src) -} -func (m *Error) XXX_Size() int { - return xxx_messageInfo_Error.Size(m) -} -func (m *Error) XXX_DiscardUnknown() { - xxx_messageInfo_Error.DiscardUnknown(m) -} - -var xxx_messageInfo_Error proto.InternalMessageInfo - -func (m *Error) GetError() string { - if m != nil { - return m.Error - } - return "" -} - -func (m *Error) GetCode() int32 { - if m != nil { - return m.Code - } - return 0 -} - -func (m *Error) GetMessage() string { - if m != nil { - return m.Message - } - return "" -} - -func (m *Error) GetDetails() []*any.Any { - if m != nil { - return m.Details - } - return nil -} - -// StreamError is a response type which is returned when -// streaming rpc returns an error. -type StreamError struct { - GrpcCode int32 `protobuf:"varint,1,opt,name=grpc_code,json=grpcCode,proto3" json:"grpc_code,omitempty"` - HttpCode int32 `protobuf:"varint,2,opt,name=http_code,json=httpCode,proto3" json:"http_code,omitempty"` - Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"` - HttpStatus string `protobuf:"bytes,4,opt,name=http_status,json=httpStatus,proto3" json:"http_status,omitempty"` - Details []*any.Any `protobuf:"bytes,5,rep,name=details,proto3" json:"details,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StreamError) Reset() { *m = StreamError{} } -func (m *StreamError) String() string { return proto.CompactTextString(m) } -func (*StreamError) ProtoMessage() {} -func (*StreamError) Descriptor() ([]byte, []int) { - return fileDescriptor_9b093362ca6d1e03, []int{1} -} - -func (m *StreamError) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StreamError.Unmarshal(m, b) -} -func (m *StreamError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StreamError.Marshal(b, m, deterministic) -} -func (m *StreamError) XXX_Merge(src proto.Message) { - xxx_messageInfo_StreamError.Merge(m, src) -} -func (m *StreamError) XXX_Size() int { - return xxx_messageInfo_StreamError.Size(m) -} -func (m *StreamError) XXX_DiscardUnknown() { - xxx_messageInfo_StreamError.DiscardUnknown(m) -} - -var xxx_messageInfo_StreamError proto.InternalMessageInfo - -func (m *StreamError) GetGrpcCode() int32 { - if m != nil { - return m.GrpcCode - } - return 0 -} - -func (m *StreamError) GetHttpCode() int32 { - if m != nil { - return m.HttpCode - } - return 0 -} - -func (m *StreamError) GetMessage() string { - if m != nil { - return m.Message - } - return "" -} - -func (m *StreamError) GetHttpStatus() string { - if m != nil { - return m.HttpStatus - } - return "" -} - -func (m *StreamError) GetDetails() []*any.Any { - if m != nil { - return m.Details - } - return nil -} - -func init() { - proto.RegisterType((*Error)(nil), "grpc.gateway.runtime.Error") - proto.RegisterType((*StreamError)(nil), "grpc.gateway.runtime.StreamError") -} - -func init() { proto.RegisterFile("internal/errors.proto", fileDescriptor_9b093362ca6d1e03) } - -var fileDescriptor_9b093362ca6d1e03 = []byte{ - // 252 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x90, 0xc1, 0x4a, 0xc4, 0x30, - 0x10, 0x86, 0x89, 0xbb, 0x75, 0xdb, 0xe9, 0x2d, 0x54, 0x88, 0xee, 0xc1, 0xb2, 0xa7, 0x9e, 0x52, - 0xd0, 0x27, 0xd0, 0xc5, 0x17, 0xe8, 0xde, 0xbc, 0x2c, 0xd9, 0xdd, 0x31, 0x16, 0xda, 0xa4, 0x24, - 0x53, 0xa4, 0xf8, 0x56, 0x3e, 0xa1, 0x24, 0xa5, 0xb0, 0x27, 0xf1, 0xd6, 0xf9, 0xfb, 0xcf, 0x7c, - 0x1f, 0x81, 0xbb, 0xd6, 0x10, 0x3a, 0xa3, 0xba, 0x1a, 0x9d, 0xb3, 0xce, 0xcb, 0xc1, 0x59, 0xb2, - 0xbc, 0xd0, 0x6e, 0x38, 0x4b, 0xad, 0x08, 0xbf, 0xd4, 0x24, 0xdd, 0x68, 0xa8, 0xed, 0xf1, 0xe1, - 0x5e, 0x5b, 0xab, 0x3b, 0xac, 0x63, 0xe7, 0x34, 0x7e, 0xd4, 0xca, 0x4c, 0xf3, 0xc2, 0xee, 0x1b, - 0x92, 0xb7, 0x70, 0x80, 0x17, 0x90, 0xc4, 0x4b, 0x82, 0x95, 0xac, 0xca, 0x9a, 0x79, 0xe0, 0x1c, - 0xd6, 0x67, 0x7b, 0x41, 0x71, 0x53, 0xb2, 0x2a, 0x69, 0xe2, 0x37, 0x17, 0xb0, 0xe9, 0xd1, 0x7b, - 0xa5, 0x51, 0xac, 0x62, 0x77, 0x19, 0xb9, 0x84, 0xcd, 0x05, 0x49, 0xb5, 0x9d, 0x17, 0xeb, 0x72, - 0x55, 0xe5, 0x4f, 0x85, 0x9c, 0xc9, 0x72, 0x21, 0xcb, 0x17, 0x33, 0x35, 0x4b, 0x69, 0xf7, 0xc3, - 0x20, 0x3f, 0x90, 0x43, 0xd5, 0xcf, 0x0e, 0x5b, 0xc8, 0x82, 0xff, 0x31, 0x22, 0x59, 0x44, 0xa6, - 0x21, 0xd8, 0x07, 0xec, 0x16, 0xb2, 0x4f, 0xa2, 0xe1, 0x78, 0xe5, 0x93, 0x86, 0x60, 0xff, 0xb7, - 0xd3, 0x23, 0xe4, 0x71, 0xcd, 0x93, 0xa2, 0x31, 0x78, 0x85, 0xbf, 0x10, 0xa2, 0x43, 0x4c, 0xae, - 0xa5, 0x93, 0x7f, 0x48, 0xbf, 0xc2, 0x7b, 0xba, 0xbc, 0xfd, 0xe9, 0x36, 0x56, 0x9e, 0x7f, 0x03, - 0x00, 0x00, 0xff, 0xff, 0xde, 0x72, 0x6b, 0x83, 0x8e, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.proto b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.proto deleted file mode 100644 index 4fb212c6b690d..0000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.proto +++ /dev/null @@ -1,26 +0,0 @@ -syntax = "proto3"; -package grpc.gateway.runtime; -option go_package = "internal"; - -import "google/protobuf/any.proto"; - -// Error is the generic error returned from unary RPCs. -message Error { - string error = 1; - // This is to make the error more compatible with users that expect errors to be Status objects: - // https://github.com/grpc/grpc/blob/master/src/proto/grpc/status/status.proto - // It should be the exact same message as the Error field. - int32 code = 2; - string message = 3; - repeated google.protobuf.Any details = 4; -} - -// StreamError is a response type which is returned when -// streaming rpc returns an error. -message StreamError { - int32 grpc_code = 1; - int32 http_code = 2; - string message = 3; - string http_status = 4; - repeated google.protobuf.Any details = 5; -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel deleted file mode 100644 index 58b72b9cf7513..0000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel +++ /dev/null @@ -1,85 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -package(default_visibility = ["//visibility:public"]) - -go_library( - name = "go_default_library", - srcs = [ - "context.go", - "convert.go", - "doc.go", - "errors.go", - "fieldmask.go", - "handler.go", - "marshal_httpbodyproto.go", - "marshal_json.go", - "marshal_jsonpb.go", - "marshal_proto.go", - "marshaler.go", - "marshaler_registry.go", - "mux.go", - "pattern.go", - "proto2_convert.go", - "proto_errors.go", - "query.go", - ], - importpath = "github.com/grpc-ecosystem/grpc-gateway/runtime", - deps = [ - "//internal:go_default_library", - "//utilities:go_default_library", - "@com_github_golang_protobuf//descriptor:go_default_library_gen", - "@com_github_golang_protobuf//jsonpb:go_default_library_gen", - "@com_github_golang_protobuf//proto:go_default_library", - "@go_googleapis//google/api:httpbody_go_proto", - "@io_bazel_rules_go//proto/wkt:any_go_proto", - "@io_bazel_rules_go//proto/wkt:descriptor_go_proto", - "@io_bazel_rules_go//proto/wkt:duration_go_proto", - "@io_bazel_rules_go//proto/wkt:field_mask_go_proto", - "@io_bazel_rules_go//proto/wkt:timestamp_go_proto", - "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", - "@org_golang_google_grpc//codes:go_default_library", - "@org_golang_google_grpc//grpclog:go_default_library", - "@org_golang_google_grpc//metadata:go_default_library", - "@org_golang_google_grpc//status:go_default_library", - ], -) - -go_test( - name = "go_default_test", - size = "small", - srcs = [ - "context_test.go", - "convert_test.go", - "errors_test.go", - "fieldmask_test.go", - "handler_test.go", - "marshal_httpbodyproto_test.go", - "marshal_json_test.go", - "marshal_jsonpb_test.go", - "marshal_proto_test.go", - "marshaler_registry_test.go", - "mux_test.go", - "pattern_test.go", - "query_test.go", - ], - embed = [":go_default_library"], - deps = [ - "//internal:go_default_library", - "//runtime/internal/examplepb:go_default_library", - "//utilities:go_default_library", - "@com_github_golang_protobuf//jsonpb:go_default_library_gen", - "@com_github_golang_protobuf//proto:go_default_library", - "@com_github_golang_protobuf//ptypes:go_default_library_gen", - "@go_googleapis//google/api:httpbody_go_proto", - "@go_googleapis//google/rpc:errdetails_go_proto", - "@io_bazel_rules_go//proto/wkt:duration_go_proto", - "@io_bazel_rules_go//proto/wkt:empty_go_proto", - "@io_bazel_rules_go//proto/wkt:field_mask_go_proto", - "@io_bazel_rules_go//proto/wkt:struct_go_proto", - "@io_bazel_rules_go//proto/wkt:timestamp_go_proto", - "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", - "@org_golang_google_grpc//codes:go_default_library", - "@org_golang_google_grpc//metadata:go_default_library", - "@org_golang_google_grpc//status:go_default_library", - ], -) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go deleted file mode 100644 index d8cbd4cc96b03..0000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go +++ /dev/null @@ -1,291 +0,0 @@ -package runtime - -import ( - "context" - "encoding/base64" - "fmt" - "net" - "net/http" - "net/textproto" - "strconv" - "strings" - "sync" - "time" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -// MetadataHeaderPrefix is the http prefix that represents custom metadata -// parameters to or from a gRPC call. -const MetadataHeaderPrefix = "Grpc-Metadata-" - -// MetadataPrefix is prepended to permanent HTTP header keys (as specified -// by the IANA) when added to the gRPC context. -const MetadataPrefix = "grpcgateway-" - -// MetadataTrailerPrefix is prepended to gRPC metadata as it is converted to -// HTTP headers in a response handled by grpc-gateway -const MetadataTrailerPrefix = "Grpc-Trailer-" - -const metadataGrpcTimeout = "Grpc-Timeout" -const metadataHeaderBinarySuffix = "-Bin" - -const xForwardedFor = "X-Forwarded-For" -const xForwardedHost = "X-Forwarded-Host" - -var ( - // DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound - // header isn't present. If the value is 0 the sent `context` will not have a timeout. - DefaultContextTimeout = 0 * time.Second -) - -func decodeBinHeader(v string) ([]byte, error) { - if len(v)%4 == 0 { - // Input was padded, or padding was not necessary. - return base64.StdEncoding.DecodeString(v) - } - return base64.RawStdEncoding.DecodeString(v) -} - -/* -AnnotateContext adds context information such as metadata from the request. - -At a minimum, the RemoteAddr is included in the fashion of "X-Forwarded-For", -except that the forwarded destination is not another HTTP service but rather -a gRPC service. -*/ -func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, error) { - ctx, md, err := annotateContext(ctx, mux, req) - if err != nil { - return nil, err - } - if md == nil { - return ctx, nil - } - - return metadata.NewOutgoingContext(ctx, md), nil -} - -// AnnotateIncomingContext adds context information such as metadata from the request. -// Attach metadata as incoming context. -func AnnotateIncomingContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, error) { - ctx, md, err := annotateContext(ctx, mux, req) - if err != nil { - return nil, err - } - if md == nil { - return ctx, nil - } - - return metadata.NewIncomingContext(ctx, md), nil -} - -func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, metadata.MD, error) { - var pairs []string - timeout := DefaultContextTimeout - if tm := req.Header.Get(metadataGrpcTimeout); tm != "" { - var err error - timeout, err = timeoutDecode(tm) - if err != nil { - return nil, nil, status.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm) - } - } - - for key, vals := range req.Header { - key = textproto.CanonicalMIMEHeaderKey(key) - for _, val := range vals { - // For backwards-compatibility, pass through 'authorization' header with no prefix. - if key == "Authorization" { - pairs = append(pairs, "authorization", val) - } - if h, ok := mux.incomingHeaderMatcher(key); ok { - // Handles "-bin" metadata in grpc, since grpc will do another base64 - // encode before sending to server, we need to decode it first. - if strings.HasSuffix(key, metadataHeaderBinarySuffix) { - b, err := decodeBinHeader(val) - if err != nil { - return nil, nil, status.Errorf(codes.InvalidArgument, "invalid binary header %s: %s", key, err) - } - - val = string(b) - } - pairs = append(pairs, h, val) - } - } - } - if host := req.Header.Get(xForwardedHost); host != "" { - pairs = append(pairs, strings.ToLower(xForwardedHost), host) - } else if req.Host != "" { - pairs = append(pairs, strings.ToLower(xForwardedHost), req.Host) - } - - if addr := req.RemoteAddr; addr != "" { - if remoteIP, _, err := net.SplitHostPort(addr); err == nil { - if fwd := req.Header.Get(xForwardedFor); fwd == "" { - pairs = append(pairs, strings.ToLower(xForwardedFor), remoteIP) - } else { - pairs = append(pairs, strings.ToLower(xForwardedFor), fmt.Sprintf("%s, %s", fwd, remoteIP)) - } - } - } - - if timeout != 0 { - ctx, _ = context.WithTimeout(ctx, timeout) - } - if len(pairs) == 0 { - return ctx, nil, nil - } - md := metadata.Pairs(pairs...) - for _, mda := range mux.metadataAnnotators { - md = metadata.Join(md, mda(ctx, req)) - } - return ctx, md, nil -} - -// ServerMetadata consists of metadata sent from gRPC server. -type ServerMetadata struct { - HeaderMD metadata.MD - TrailerMD metadata.MD -} - -type serverMetadataKey struct{} - -// NewServerMetadataContext creates a new context with ServerMetadata -func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context { - return context.WithValue(ctx, serverMetadataKey{}, md) -} - -// ServerMetadataFromContext returns the ServerMetadata in ctx -func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) { - md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata) - return -} - -// ServerTransportStream implements grpc.ServerTransportStream. -// It should only be used by the generated files to support grpc.SendHeader -// outside of gRPC server use. -type ServerTransportStream struct { - mu sync.Mutex - header metadata.MD - trailer metadata.MD -} - -// Method returns the method for the stream. -func (s *ServerTransportStream) Method() string { - return "" -} - -// Header returns the header metadata of the stream. -func (s *ServerTransportStream) Header() metadata.MD { - s.mu.Lock() - defer s.mu.Unlock() - return s.header.Copy() -} - -// SetHeader sets the header metadata. -func (s *ServerTransportStream) SetHeader(md metadata.MD) error { - if md.Len() == 0 { - return nil - } - - s.mu.Lock() - s.header = metadata.Join(s.header, md) - s.mu.Unlock() - return nil -} - -// SendHeader sets the header metadata. -func (s *ServerTransportStream) SendHeader(md metadata.MD) error { - return s.SetHeader(md) -} - -// Trailer returns the cached trailer metadata. -func (s *ServerTransportStream) Trailer() metadata.MD { - s.mu.Lock() - defer s.mu.Unlock() - return s.trailer.Copy() -} - -// SetTrailer sets the trailer metadata. -func (s *ServerTransportStream) SetTrailer(md metadata.MD) error { - if md.Len() == 0 { - return nil - } - - s.mu.Lock() - s.trailer = metadata.Join(s.trailer, md) - s.mu.Unlock() - return nil -} - -func timeoutDecode(s string) (time.Duration, error) { - size := len(s) - if size < 2 { - return 0, fmt.Errorf("timeout string is too short: %q", s) - } - d, ok := timeoutUnitToDuration(s[size-1]) - if !ok { - return 0, fmt.Errorf("timeout unit is not recognized: %q", s) - } - t, err := strconv.ParseInt(s[:size-1], 10, 64) - if err != nil { - return 0, err - } - return d * time.Duration(t), nil -} - -func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) { - switch u { - case 'H': - return time.Hour, true - case 'M': - return time.Minute, true - case 'S': - return time.Second, true - case 'm': - return time.Millisecond, true - case 'u': - return time.Microsecond, true - case 'n': - return time.Nanosecond, true - default: - } - return -} - -// isPermanentHTTPHeader checks whether hdr belongs to the list of -// permanent request headers maintained by IANA. -// http://www.iana.org/assignments/message-headers/message-headers.xml -func isPermanentHTTPHeader(hdr string) bool { - switch hdr { - case - "Accept", - "Accept-Charset", - "Accept-Language", - "Accept-Ranges", - "Authorization", - "Cache-Control", - "Content-Type", - "Cookie", - "Date", - "Expect", - "From", - "Host", - "If-Match", - "If-Modified-Since", - "If-None-Match", - "If-Schedule-Tag-Match", - "If-Unmodified-Since", - "Max-Forwards", - "Origin", - "Pragma", - "Referer", - "User-Agent", - "Via", - "Warning": - return true - } - return false -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go deleted file mode 100644 index 2c279344dc414..0000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go +++ /dev/null @@ -1,318 +0,0 @@ -package runtime - -import ( - "encoding/base64" - "fmt" - "strconv" - "strings" - - "github.com/golang/protobuf/jsonpb" - "github.com/golang/protobuf/ptypes/duration" - "github.com/golang/protobuf/ptypes/timestamp" - "github.com/golang/protobuf/ptypes/wrappers" -) - -// String just returns the given string. -// It is just for compatibility to other types. -func String(val string) (string, error) { - return val, nil -} - -// StringSlice converts 'val' where individual strings are separated by -// 'sep' into a string slice. -func StringSlice(val, sep string) ([]string, error) { - return strings.Split(val, sep), nil -} - -// Bool converts the given string representation of a boolean value into bool. -func Bool(val string) (bool, error) { - return strconv.ParseBool(val) -} - -// BoolSlice converts 'val' where individual booleans are separated by -// 'sep' into a bool slice. -func BoolSlice(val, sep string) ([]bool, error) { - s := strings.Split(val, sep) - values := make([]bool, len(s)) - for i, v := range s { - value, err := Bool(v) - if err != nil { - return values, err - } - values[i] = value - } - return values, nil -} - -// Float64 converts the given string representation into representation of a floating point number into float64. -func Float64(val string) (float64, error) { - return strconv.ParseFloat(val, 64) -} - -// Float64Slice converts 'val' where individual floating point numbers are separated by -// 'sep' into a float64 slice. -func Float64Slice(val, sep string) ([]float64, error) { - s := strings.Split(val, sep) - values := make([]float64, len(s)) - for i, v := range s { - value, err := Float64(v) - if err != nil { - return values, err - } - values[i] = value - } - return values, nil -} - -// Float32 converts the given string representation of a floating point number into float32. -func Float32(val string) (float32, error) { - f, err := strconv.ParseFloat(val, 32) - if err != nil { - return 0, err - } - return float32(f), nil -} - -// Float32Slice converts 'val' where individual floating point numbers are separated by -// 'sep' into a float32 slice. -func Float32Slice(val, sep string) ([]float32, error) { - s := strings.Split(val, sep) - values := make([]float32, len(s)) - for i, v := range s { - value, err := Float32(v) - if err != nil { - return values, err - } - values[i] = value - } - return values, nil -} - -// Int64 converts the given string representation of an integer into int64. -func Int64(val string) (int64, error) { - return strconv.ParseInt(val, 0, 64) -} - -// Int64Slice converts 'val' where individual integers are separated by -// 'sep' into a int64 slice. -func Int64Slice(val, sep string) ([]int64, error) { - s := strings.Split(val, sep) - values := make([]int64, len(s)) - for i, v := range s { - value, err := Int64(v) - if err != nil { - return values, err - } - values[i] = value - } - return values, nil -} - -// Int32 converts the given string representation of an integer into int32. -func Int32(val string) (int32, error) { - i, err := strconv.ParseInt(val, 0, 32) - if err != nil { - return 0, err - } - return int32(i), nil -} - -// Int32Slice converts 'val' where individual integers are separated by -// 'sep' into a int32 slice. -func Int32Slice(val, sep string) ([]int32, error) { - s := strings.Split(val, sep) - values := make([]int32, len(s)) - for i, v := range s { - value, err := Int32(v) - if err != nil { - return values, err - } - values[i] = value - } - return values, nil -} - -// Uint64 converts the given string representation of an integer into uint64. -func Uint64(val string) (uint64, error) { - return strconv.ParseUint(val, 0, 64) -} - -// Uint64Slice converts 'val' where individual integers are separated by -// 'sep' into a uint64 slice. -func Uint64Slice(val, sep string) ([]uint64, error) { - s := strings.Split(val, sep) - values := make([]uint64, len(s)) - for i, v := range s { - value, err := Uint64(v) - if err != nil { - return values, err - } - values[i] = value - } - return values, nil -} - -// Uint32 converts the given string representation of an integer into uint32. -func Uint32(val string) (uint32, error) { - i, err := strconv.ParseUint(val, 0, 32) - if err != nil { - return 0, err - } - return uint32(i), nil -} - -// Uint32Slice converts 'val' where individual integers are separated by -// 'sep' into a uint32 slice. -func Uint32Slice(val, sep string) ([]uint32, error) { - s := strings.Split(val, sep) - values := make([]uint32, len(s)) - for i, v := range s { - value, err := Uint32(v) - if err != nil { - return values, err - } - values[i] = value - } - return values, nil -} - -// Bytes converts the given string representation of a byte sequence into a slice of bytes -// A bytes sequence is encoded in URL-safe base64 without padding -func Bytes(val string) ([]byte, error) { - b, err := base64.StdEncoding.DecodeString(val) - if err != nil { - b, err = base64.URLEncoding.DecodeString(val) - if err != nil { - return nil, err - } - } - return b, nil -} - -// BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe -// base64 without padding, are separated by 'sep' into a slice of bytes slices slice. -func BytesSlice(val, sep string) ([][]byte, error) { - s := strings.Split(val, sep) - values := make([][]byte, len(s)) - for i, v := range s { - value, err := Bytes(v) - if err != nil { - return values, err - } - values[i] = value - } - return values, nil -} - -// Timestamp converts the given RFC3339 formatted string into a timestamp.Timestamp. -func Timestamp(val string) (*timestamp.Timestamp, error) { - var r timestamp.Timestamp - err := jsonpb.UnmarshalString(val, &r) - if err != nil { - return nil, err - } - return &r, nil -} - -// Duration converts the given string into a timestamp.Duration. -func Duration(val string) (*duration.Duration, error) { - var r duration.Duration - err := jsonpb.UnmarshalString(val, &r) - if err != nil { - return nil, err - } - return &r, nil -} - -// Enum converts the given string into an int32 that should be type casted into the -// correct enum proto type. -func Enum(val string, enumValMap map[string]int32) (int32, error) { - e, ok := enumValMap[val] - if ok { - return e, nil - } - - i, err := Int32(val) - if err != nil { - return 0, fmt.Errorf("%s is not valid", val) - } - for _, v := range enumValMap { - if v == i { - return i, nil - } - } - return 0, fmt.Errorf("%s is not valid", val) -} - -// EnumSlice converts 'val' where individual enums are separated by 'sep' -// into a int32 slice. Each individual int32 should be type casted into the -// correct enum proto type. -func EnumSlice(val, sep string, enumValMap map[string]int32) ([]int32, error) { - s := strings.Split(val, sep) - values := make([]int32, len(s)) - for i, v := range s { - value, err := Enum(v, enumValMap) - if err != nil { - return values, err - } - values[i] = value - } - return values, nil -} - -/* - Support fot google.protobuf.wrappers on top of primitive types -*/ - -// StringValue well-known type support as wrapper around string type -func StringValue(val string) (*wrappers.StringValue, error) { - return &wrappers.StringValue{Value: val}, nil -} - -// FloatValue well-known type support as wrapper around float32 type -func FloatValue(val string) (*wrappers.FloatValue, error) { - parsedVal, err := Float32(val) - return &wrappers.FloatValue{Value: parsedVal}, err -} - -// DoubleValue well-known type support as wrapper around float64 type -func DoubleValue(val string) (*wrappers.DoubleValue, error) { - parsedVal, err := Float64(val) - return &wrappers.DoubleValue{Value: parsedVal}, err -} - -// BoolValue well-known type support as wrapper around bool type -func BoolValue(val string) (*wrappers.BoolValue, error) { - parsedVal, err := Bool(val) - return &wrappers.BoolValue{Value: parsedVal}, err -} - -// Int32Value well-known type support as wrapper around int32 type -func Int32Value(val string) (*wrappers.Int32Value, error) { - parsedVal, err := Int32(val) - return &wrappers.Int32Value{Value: parsedVal}, err -} - -// UInt32Value well-known type support as wrapper around uint32 type -func UInt32Value(val string) (*wrappers.UInt32Value, error) { - parsedVal, err := Uint32(val) - return &wrappers.UInt32Value{Value: parsedVal}, err -} - -// Int64Value well-known type support as wrapper around int64 type -func Int64Value(val string) (*wrappers.Int64Value, error) { - parsedVal, err := Int64(val) - return &wrappers.Int64Value{Value: parsedVal}, err -} - -// UInt64Value well-known type support as wrapper around uint64 type -func UInt64Value(val string) (*wrappers.UInt64Value, error) { - parsedVal, err := Uint64(val) - return &wrappers.UInt64Value{Value: parsedVal}, err -} - -// BytesValue well-known type support as wrapper around bytes[] type -func BytesValue(val string) (*wrappers.BytesValue, error) { - parsedVal, err := Bytes(val) - return &wrappers.BytesValue{Value: parsedVal}, err -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go deleted file mode 100644 index b6e5ddf7a9f13..0000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -/* -Package runtime contains runtime helper functions used by -servers which protoc-gen-grpc-gateway generates. -*/ -package runtime diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go deleted file mode 100644 index b2ce743bddcd6..0000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go +++ /dev/null @@ -1,186 +0,0 @@ -package runtime - -import ( - "context" - "io" - "net/http" - "strings" - - "github.com/grpc-ecosystem/grpc-gateway/internal" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" -) - -// HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status. -// See: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto -func HTTPStatusFromCode(code codes.Code) int { - switch code { - case codes.OK: - return http.StatusOK - case codes.Canceled: - return http.StatusRequestTimeout - case codes.Unknown: - return http.StatusInternalServerError - case codes.InvalidArgument: - return http.StatusBadRequest - case codes.DeadlineExceeded: - return http.StatusGatewayTimeout - case codes.NotFound: - return http.StatusNotFound - case codes.AlreadyExists: - return http.StatusConflict - case codes.PermissionDenied: - return http.StatusForbidden - case codes.Unauthenticated: - return http.StatusUnauthorized - case codes.ResourceExhausted: - return http.StatusTooManyRequests - case codes.FailedPrecondition: - // Note, this deliberately doesn't translate to the similarly named '412 Precondition Failed' HTTP response status. - return http.StatusBadRequest - case codes.Aborted: - return http.StatusConflict - case codes.OutOfRange: - return http.StatusBadRequest - case codes.Unimplemented: - return http.StatusNotImplemented - case codes.Internal: - return http.StatusInternalServerError - case codes.Unavailable: - return http.StatusServiceUnavailable - case codes.DataLoss: - return http.StatusInternalServerError - } - - grpclog.Infof("Unknown gRPC error code: %v", code) - return http.StatusInternalServerError -} - -var ( - // HTTPError replies to the request with an error. - // - // HTTPError is called: - // - From generated per-endpoint gateway handler code, when calling the backend results in an error. - // - From gateway runtime code, when forwarding the response message results in an error. - // - // The default value for HTTPError calls the custom error handler configured on the ServeMux via the - // WithProtoErrorHandler serve option if that option was used, calling GlobalHTTPErrorHandler otherwise. - // - // To customize the error handling of a particular ServeMux instance, use the WithProtoErrorHandler - // serve option. - // - // To customize the error format for all ServeMux instances not using the WithProtoErrorHandler serve - // option, set GlobalHTTPErrorHandler to a custom function. - // - // Setting this variable directly to customize error format is deprecated. - HTTPError = MuxOrGlobalHTTPError - - // GlobalHTTPErrorHandler is the HTTPError handler for all ServeMux instances not using the - // WithProtoErrorHandler serve option. - // - // You can set a custom function to this variable to customize error format. - GlobalHTTPErrorHandler = DefaultHTTPError - - // OtherErrorHandler handles gateway errors from parsing and routing client requests for all - // ServeMux instances not using the WithProtoErrorHandler serve option. - // - // It returns the following error codes: StatusMethodNotAllowed StatusNotFound StatusBadRequest - // - // To customize parsing and routing error handling of a particular ServeMux instance, use the - // WithProtoErrorHandler serve option. - // - // To customize parsing and routing error handling of all ServeMux instances not using the - // WithProtoErrorHandler serve option, set a custom function to this variable. - OtherErrorHandler = DefaultOtherErrorHandler -) - -// MuxOrGlobalHTTPError uses the mux-configured error handler, falling back to GlobalErrorHandler. -func MuxOrGlobalHTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) { - if mux.protoErrorHandler != nil { - mux.protoErrorHandler(ctx, mux, marshaler, w, r, err) - } else { - GlobalHTTPErrorHandler(ctx, mux, marshaler, w, r, err) - } -} - -// DefaultHTTPError is the default implementation of HTTPError. -// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode. -// If otherwise, it replies with http.StatusInternalServerError. -// -// The response body returned by this function is a JSON object, -// which contains a member whose key is "error" and whose value is err.Error(). -func DefaultHTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) { - const fallback = `{"error": "failed to marshal error message"}` - - s, ok := status.FromError(err) - if !ok { - s = status.New(codes.Unknown, err.Error()) - } - - w.Header().Del("Trailer") - w.Header().Del("Transfer-Encoding") - - contentType := marshaler.ContentType() - // Check marshaler on run time in order to keep backwards compatibility - // An interface param needs to be added to the ContentType() function on - // the Marshal interface to be able to remove this check - if typeMarshaler, ok := marshaler.(contentTypeMarshaler); ok { - pb := s.Proto() - contentType = typeMarshaler.ContentTypeFromMessage(pb) - } - w.Header().Set("Content-Type", contentType) - - body := &internal.Error{ - Error: s.Message(), - Message: s.Message(), - Code: int32(s.Code()), - Details: s.Proto().GetDetails(), - } - - buf, merr := marshaler.Marshal(body) - if merr != nil { - grpclog.Infof("Failed to marshal error message %q: %v", body, merr) - w.WriteHeader(http.StatusInternalServerError) - if _, err := io.WriteString(w, fallback); err != nil { - grpclog.Infof("Failed to write response: %v", err) - } - return - } - - md, ok := ServerMetadataFromContext(ctx) - if !ok { - grpclog.Infof("Failed to extract ServerMetadata from context") - } - - handleForwardResponseServerMetadata(w, mux, md) - - // RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2 - // Unless the request includes a TE header field indicating "trailers" - // is acceptable, as described in Section 4.3, a server SHOULD NOT - // generate trailer fields that it believes are necessary for the user - // agent to receive. - var wantsTrailers bool - - if te := r.Header.Get("TE"); strings.Contains(strings.ToLower(te), "trailers") { - wantsTrailers = true - handleForwardResponseTrailerHeader(w, md) - w.Header().Set("Transfer-Encoding", "chunked") - } - - st := HTTPStatusFromCode(s.Code()) - w.WriteHeader(st) - if _, err := w.Write(buf); err != nil { - grpclog.Infof("Failed to write response: %v", err) - } - - if wantsTrailers { - handleForwardResponseTrailer(w, md) - } -} - -// DefaultOtherErrorHandler is the default implementation of OtherErrorHandler. -// It simply writes a string representation of the given error into "w". -func DefaultOtherErrorHandler(w http.ResponseWriter, _ *http.Request, msg string, code int) { - http.Error(w, msg, code) -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go deleted file mode 100644 index aef645e40b92d..0000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go +++ /dev/null @@ -1,89 +0,0 @@ -package runtime - -import ( - "encoding/json" - "io" - "strings" - - descriptor2 "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/protoc-gen-go/descriptor" - "google.golang.org/genproto/protobuf/field_mask" -) - -func translateName(name string, md *descriptor.DescriptorProto) (string, *descriptor.DescriptorProto) { - // TODO - should really gate this with a test that the marshaller has used json names - if md != nil { - for _, f := range md.Field { - if f.JsonName != nil && f.Name != nil && *f.JsonName == name { - var subType *descriptor.DescriptorProto - - // If the field has a TypeName then we retrieve the nested type for translating the embedded message names. - if f.TypeName != nil { - typeSplit := strings.Split(*f.TypeName, ".") - typeName := typeSplit[len(typeSplit)-1] - for _, t := range md.NestedType { - if typeName == *t.Name { - subType = t - } - } - } - return *f.Name, subType - } - } - } - return name, nil -} - -// FieldMaskFromRequestBody creates a FieldMask printing all complete paths from the JSON body. -func FieldMaskFromRequestBody(r io.Reader, md *descriptor.DescriptorProto) (*field_mask.FieldMask, error) { - fm := &field_mask.FieldMask{} - var root interface{} - if err := json.NewDecoder(r).Decode(&root); err != nil { - if err == io.EOF { - return fm, nil - } - return nil, err - } - - queue := []fieldMaskPathItem{{node: root, md: md}} - for len(queue) > 0 { - // dequeue an item - item := queue[0] - queue = queue[1:] - - if m, ok := item.node.(map[string]interface{}); ok { - // if the item is an object, then enqueue all of its children - for k, v := range m { - protoName, subMd := translateName(k, item.md) - if subMsg, ok := v.(descriptor2.Message); ok { - _, subMd = descriptor2.ForMessage(subMsg) - } - - var path string - if item.path == "" { - path = protoName - } else { - path = item.path + "." + protoName - } - queue = append(queue, fieldMaskPathItem{path: path, node: v, md: subMd}) - } - } else if len(item.path) > 0 { - // otherwise, it's a leaf node so print its path - fm.Paths = append(fm.Paths, item.path) - } - } - - return fm, nil -} - -// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask -type fieldMaskPathItem struct { - // the list of prior fields leading up to node connected by dots - path string - - // a generic decoded json object the current item to inspect for further path extraction - node interface{} - - // descriptor for parent message - md *descriptor.DescriptorProto -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go deleted file mode 100644 index e6e8f286e1294..0000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go +++ /dev/null @@ -1,212 +0,0 @@ -package runtime - -import ( - "context" - "errors" - "fmt" - "io" - "net/http" - "net/textproto" - - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/internal" - "google.golang.org/grpc/grpclog" -) - -var errEmptyResponse = errors.New("empty response") - -// ForwardResponseStream forwards the stream from gRPC server to REST client. -func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { - f, ok := w.(http.Flusher) - if !ok { - grpclog.Infof("Flush not supported in %T", w) - http.Error(w, "unexpected type of web server", http.StatusInternalServerError) - return - } - - md, ok := ServerMetadataFromContext(ctx) - if !ok { - grpclog.Infof("Failed to extract ServerMetadata from context") - http.Error(w, "unexpected error", http.StatusInternalServerError) - return - } - handleForwardResponseServerMetadata(w, mux, md) - - w.Header().Set("Transfer-Encoding", "chunked") - w.Header().Set("Content-Type", marshaler.ContentType()) - if err := handleForwardResponseOptions(ctx, w, nil, opts); err != nil { - HTTPError(ctx, mux, marshaler, w, req, err) - return - } - - var delimiter []byte - if d, ok := marshaler.(Delimited); ok { - delimiter = d.Delimiter() - } else { - delimiter = []byte("\n") - } - - var wroteHeader bool - for { - resp, err := recv() - if err == io.EOF { - return - } - if err != nil { - handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err) - return - } - if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil { - handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err) - return - } - - var buf []byte - switch { - case resp == nil: - buf, err = marshaler.Marshal(errorChunk(streamError(ctx, mux.streamErrorHandler, errEmptyResponse))) - default: - result := map[string]interface{}{"result": resp} - if rb, ok := resp.(responseBody); ok { - result["result"] = rb.XXX_ResponseBody() - } - - buf, err = marshaler.Marshal(result) - } - - if err != nil { - grpclog.Infof("Failed to marshal response chunk: %v", err) - handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err) - return - } - if _, err = w.Write(buf); err != nil { - grpclog.Infof("Failed to send response chunk: %v", err) - return - } - wroteHeader = true - if _, err = w.Write(delimiter); err != nil { - grpclog.Infof("Failed to send delimiter chunk: %v", err) - return - } - f.Flush() - } -} - -func handleForwardResponseServerMetadata(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) { - for k, vs := range md.HeaderMD { - if h, ok := mux.outgoingHeaderMatcher(k); ok { - for _, v := range vs { - w.Header().Add(h, v) - } - } - } -} - -func handleForwardResponseTrailerHeader(w http.ResponseWriter, md ServerMetadata) { - for k := range md.TrailerMD { - tKey := textproto.CanonicalMIMEHeaderKey(fmt.Sprintf("%s%s", MetadataTrailerPrefix, k)) - w.Header().Add("Trailer", tKey) - } -} - -func handleForwardResponseTrailer(w http.ResponseWriter, md ServerMetadata) { - for k, vs := range md.TrailerMD { - tKey := fmt.Sprintf("%s%s", MetadataTrailerPrefix, k) - for _, v := range vs { - w.Header().Add(tKey, v) - } - } -} - -// responseBody interface contains method for getting field for marshaling to the response body -// this method is generated for response struct from the value of `response_body` in the `google.api.HttpRule` -type responseBody interface { - XXX_ResponseBody() interface{} -} - -// ForwardResponseMessage forwards the message "resp" from gRPC server to REST client. -func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { - md, ok := ServerMetadataFromContext(ctx) - if !ok { - grpclog.Infof("Failed to extract ServerMetadata from context") - } - - handleForwardResponseServerMetadata(w, mux, md) - handleForwardResponseTrailerHeader(w, md) - - contentType := marshaler.ContentType() - // Check marshaler on run time in order to keep backwards compatibility - // An interface param needs to be added to the ContentType() function on - // the Marshal interface to be able to remove this check - if typeMarshaler, ok := marshaler.(contentTypeMarshaler); ok { - contentType = typeMarshaler.ContentTypeFromMessage(resp) - } - w.Header().Set("Content-Type", contentType) - - if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil { - HTTPError(ctx, mux, marshaler, w, req, err) - return - } - var buf []byte - var err error - if rb, ok := resp.(responseBody); ok { - buf, err = marshaler.Marshal(rb.XXX_ResponseBody()) - } else { - buf, err = marshaler.Marshal(resp) - } - if err != nil { - grpclog.Infof("Marshal error: %v", err) - HTTPError(ctx, mux, marshaler, w, req, err) - return - } - - if _, err = w.Write(buf); err != nil { - grpclog.Infof("Failed to write response: %v", err) - } - - handleForwardResponseTrailer(w, md) -} - -func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, resp proto.Message, opts []func(context.Context, http.ResponseWriter, proto.Message) error) error { - if len(opts) == 0 { - return nil - } - for _, opt := range opts { - if err := opt(ctx, w, resp); err != nil { - grpclog.Infof("Error handling ForwardResponseOptions: %v", err) - return err - } - } - return nil -} - -func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error) { - serr := streamError(ctx, mux.streamErrorHandler, err) - if !wroteHeader { - w.WriteHeader(int(serr.HttpCode)) - } - buf, merr := marshaler.Marshal(errorChunk(serr)) - if merr != nil { - grpclog.Infof("Failed to marshal an error: %v", merr) - return - } - if _, werr := w.Write(buf); werr != nil { - grpclog.Infof("Failed to notify error to client: %v", werr) - return - } -} - -// streamError returns the payload for the final message in a response stream -// that represents the given err. -func streamError(ctx context.Context, errHandler StreamErrorHandlerFunc, err error) *StreamError { - serr := errHandler(ctx, err) - if serr != nil { - return serr - } - // TODO: log about misbehaving stream error handler? - return DefaultHTTPStreamErrorHandler(ctx, err) -} - -func errorChunk(err *StreamError) map[string]proto.Message { - return map[string]proto.Message{"error": (*internal.StreamError)(err)} -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go deleted file mode 100644 index 525b0338c7471..0000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go +++ /dev/null @@ -1,43 +0,0 @@ -package runtime - -import ( - "google.golang.org/genproto/googleapis/api/httpbody" -) - -// SetHTTPBodyMarshaler overwrite the default marshaler with the HTTPBodyMarshaler -func SetHTTPBodyMarshaler(serveMux *ServeMux) { - serveMux.marshalers.mimeMap[MIMEWildcard] = &HTTPBodyMarshaler{ - Marshaler: &JSONPb{OrigName: true}, - } -} - -// HTTPBodyMarshaler is a Marshaler which supports marshaling of a -// google.api.HttpBody message as the full response body if it is -// the actual message used as the response. If not, then this will -// simply fallback to the Marshaler specified as its default Marshaler. -type HTTPBodyMarshaler struct { - Marshaler -} - -// ContentType implementation to keep backwards compatibility with marshal interface -func (h *HTTPBodyMarshaler) ContentType() string { - return h.ContentTypeFromMessage(nil) -} - -// ContentTypeFromMessage in case v is a google.api.HttpBody message it returns -// its specified content type otherwise fall back to the default Marshaler. -func (h *HTTPBodyMarshaler) ContentTypeFromMessage(v interface{}) string { - if httpBody, ok := v.(*httpbody.HttpBody); ok { - return httpBody.GetContentType() - } - return h.Marshaler.ContentType() -} - -// Marshal marshals "v" by returning the body bytes if v is a -// google.api.HttpBody message, otherwise it falls back to the default Marshaler. -func (h *HTTPBodyMarshaler) Marshal(v interface{}) ([]byte, error) { - if httpBody, ok := v.(*httpbody.HttpBody); ok { - return httpBody.Data, nil - } - return h.Marshaler.Marshal(v) -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go deleted file mode 100644 index f9d3a585a4c05..0000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go +++ /dev/null @@ -1,45 +0,0 @@ -package runtime - -import ( - "encoding/json" - "io" -) - -// JSONBuiltin is a Marshaler which marshals/unmarshals into/from JSON -// with the standard "encoding/json" package of Golang. -// Although it is generally faster for simple proto messages than JSONPb, -// it does not support advanced features of protobuf, e.g. map, oneof, .... -// -// The NewEncoder and NewDecoder types return *json.Encoder and -// *json.Decoder respectively. -type JSONBuiltin struct{} - -// ContentType always Returns "application/json". -func (*JSONBuiltin) ContentType() string { - return "application/json" -} - -// Marshal marshals "v" into JSON -func (j *JSONBuiltin) Marshal(v interface{}) ([]byte, error) { - return json.Marshal(v) -} - -// Unmarshal unmarshals JSON data into "v". -func (j *JSONBuiltin) Unmarshal(data []byte, v interface{}) error { - return json.Unmarshal(data, v) -} - -// NewDecoder returns a Decoder which reads JSON stream from "r". -func (j *JSONBuiltin) NewDecoder(r io.Reader) Decoder { - return json.NewDecoder(r) -} - -// NewEncoder returns an Encoder which writes JSON stream into "w". -func (j *JSONBuiltin) NewEncoder(w io.Writer) Encoder { - return json.NewEncoder(w) -} - -// Delimiter for newline encoded JSON streams. -func (j *JSONBuiltin) Delimiter() []byte { - return []byte("\n") -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go deleted file mode 100644 index f0de351b212dd..0000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go +++ /dev/null @@ -1,262 +0,0 @@ -package runtime - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "reflect" - - "github.com/golang/protobuf/jsonpb" - "github.com/golang/protobuf/proto" -) - -// JSONPb is a Marshaler which marshals/unmarshals into/from JSON -// with the "github.com/golang/protobuf/jsonpb". -// It supports fully functionality of protobuf unlike JSONBuiltin. -// -// The NewDecoder method returns a DecoderWrapper, so the underlying -// *json.Decoder methods can be used. -type JSONPb jsonpb.Marshaler - -// ContentType always returns "application/json". -func (*JSONPb) ContentType() string { - return "application/json" -} - -// Marshal marshals "v" into JSON. -func (j *JSONPb) Marshal(v interface{}) ([]byte, error) { - if _, ok := v.(proto.Message); !ok { - return j.marshalNonProtoField(v) - } - - var buf bytes.Buffer - if err := j.marshalTo(&buf, v); err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error { - p, ok := v.(proto.Message) - if !ok { - buf, err := j.marshalNonProtoField(v) - if err != nil { - return err - } - _, err = w.Write(buf) - return err - } - return (*jsonpb.Marshaler)(j).Marshal(w, p) -} - -var ( - // protoMessageType is stored to prevent constant lookup of the same type at runtime. - protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem() -) - -// marshalNonProto marshals a non-message field of a protobuf message. -// This function does not correctly marshals arbitrary data structure into JSON, -// but it is only capable of marshaling non-message field values of protobuf, -// i.e. primitive types, enums; pointers to primitives or enums; maps from -// integer/string types to primitives/enums/pointers to messages. -func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) { - if v == nil { - return []byte("null"), nil - } - rv := reflect.ValueOf(v) - for rv.Kind() == reflect.Ptr { - if rv.IsNil() { - return []byte("null"), nil - } - rv = rv.Elem() - } - - if rv.Kind() == reflect.Slice { - if rv.IsNil() { - if j.EmitDefaults { - return []byte("[]"), nil - } - return []byte("null"), nil - } - - if rv.Type().Elem().Implements(protoMessageType) { - var buf bytes.Buffer - err := buf.WriteByte('[') - if err != nil { - return nil, err - } - for i := 0; i < rv.Len(); i++ { - if i != 0 { - err = buf.WriteByte(',') - if err != nil { - return nil, err - } - } - if err = (*jsonpb.Marshaler)(j).Marshal(&buf, rv.Index(i).Interface().(proto.Message)); err != nil { - return nil, err - } - } - err = buf.WriteByte(']') - if err != nil { - return nil, err - } - - return buf.Bytes(), nil - } - } - - if rv.Kind() == reflect.Map { - m := make(map[string]*json.RawMessage) - for _, k := range rv.MapKeys() { - buf, err := j.Marshal(rv.MapIndex(k).Interface()) - if err != nil { - return nil, err - } - m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf) - } - if j.Indent != "" { - return json.MarshalIndent(m, "", j.Indent) - } - return json.Marshal(m) - } - if enum, ok := rv.Interface().(protoEnum); ok && !j.EnumsAsInts { - return json.Marshal(enum.String()) - } - return json.Marshal(rv.Interface()) -} - -// Unmarshal unmarshals JSON "data" into "v" -func (j *JSONPb) Unmarshal(data []byte, v interface{}) error { - return unmarshalJSONPb(data, v) -} - -// NewDecoder returns a Decoder which reads JSON stream from "r". -func (j *JSONPb) NewDecoder(r io.Reader) Decoder { - d := json.NewDecoder(r) - return DecoderWrapper{Decoder: d} -} - -// DecoderWrapper is a wrapper around a *json.Decoder that adds -// support for protos to the Decode method. -type DecoderWrapper struct { - *json.Decoder -} - -// Decode wraps the embedded decoder's Decode method to support -// protos using a jsonpb.Unmarshaler. -func (d DecoderWrapper) Decode(v interface{}) error { - return decodeJSONPb(d.Decoder, v) -} - -// NewEncoder returns an Encoder which writes JSON stream into "w". -func (j *JSONPb) NewEncoder(w io.Writer) Encoder { - return EncoderFunc(func(v interface{}) error { - if err := j.marshalTo(w, v); err != nil { - return err - } - // mimic json.Encoder by adding a newline (makes output - // easier to read when it contains multiple encoded items) - _, err := w.Write(j.Delimiter()) - return err - }) -} - -func unmarshalJSONPb(data []byte, v interface{}) error { - d := json.NewDecoder(bytes.NewReader(data)) - return decodeJSONPb(d, v) -} - -func decodeJSONPb(d *json.Decoder, v interface{}) error { - p, ok := v.(proto.Message) - if !ok { - return decodeNonProtoField(d, v) - } - unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: allowUnknownFields} - return unmarshaler.UnmarshalNext(d, p) -} - -func decodeNonProtoField(d *json.Decoder, v interface{}) error { - rv := reflect.ValueOf(v) - if rv.Kind() != reflect.Ptr { - return fmt.Errorf("%T is not a pointer", v) - } - for rv.Kind() == reflect.Ptr { - if rv.IsNil() { - rv.Set(reflect.New(rv.Type().Elem())) - } - if rv.Type().ConvertibleTo(typeProtoMessage) { - unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: allowUnknownFields} - return unmarshaler.UnmarshalNext(d, rv.Interface().(proto.Message)) - } - rv = rv.Elem() - } - if rv.Kind() == reflect.Map { - if rv.IsNil() { - rv.Set(reflect.MakeMap(rv.Type())) - } - conv, ok := convFromType[rv.Type().Key().Kind()] - if !ok { - return fmt.Errorf("unsupported type of map field key: %v", rv.Type().Key()) - } - - m := make(map[string]*json.RawMessage) - if err := d.Decode(&m); err != nil { - return err - } - for k, v := range m { - result := conv.Call([]reflect.Value{reflect.ValueOf(k)}) - if err := result[1].Interface(); err != nil { - return err.(error) - } - bk := result[0] - bv := reflect.New(rv.Type().Elem()) - if err := unmarshalJSONPb([]byte(*v), bv.Interface()); err != nil { - return err - } - rv.SetMapIndex(bk, bv.Elem()) - } - return nil - } - if _, ok := rv.Interface().(protoEnum); ok { - var repr interface{} - if err := d.Decode(&repr); err != nil { - return err - } - switch repr.(type) { - case string: - // TODO(yugui) Should use proto.StructProperties? - return fmt.Errorf("unmarshaling of symbolic enum %q not supported: %T", repr, rv.Interface()) - case float64: - rv.Set(reflect.ValueOf(int32(repr.(float64))).Convert(rv.Type())) - return nil - default: - return fmt.Errorf("cannot assign %#v into Go type %T", repr, rv.Interface()) - } - } - return d.Decode(v) -} - -type protoEnum interface { - fmt.Stringer - EnumDescriptor() ([]byte, []int) -} - -var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem() - -// Delimiter for newline encoded JSON streams. -func (j *JSONPb) Delimiter() []byte { - return []byte("\n") -} - -// allowUnknownFields helps not to return an error when the destination -// is a struct and the input contains object keys which do not match any -// non-ignored, exported fields in the destination. -var allowUnknownFields = true - -// DisallowUnknownFields enables option in decoder (unmarshaller) to -// return an error when it finds an unknown field. This function must be -// called before using the JSON marshaller. -func DisallowUnknownFields() { - allowUnknownFields = false -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go deleted file mode 100644 index f65d1a2676b87..0000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go +++ /dev/null @@ -1,62 +0,0 @@ -package runtime - -import ( - "io" - - "errors" - "github.com/golang/protobuf/proto" - "io/ioutil" -) - -// ProtoMarshaller is a Marshaller which marshals/unmarshals into/from serialize proto bytes -type ProtoMarshaller struct{} - -// ContentType always returns "application/octet-stream". -func (*ProtoMarshaller) ContentType() string { - return "application/octet-stream" -} - -// Marshal marshals "value" into Proto -func (*ProtoMarshaller) Marshal(value interface{}) ([]byte, error) { - message, ok := value.(proto.Message) - if !ok { - return nil, errors.New("unable to marshal non proto field") - } - return proto.Marshal(message) -} - -// Unmarshal unmarshals proto "data" into "value" -func (*ProtoMarshaller) Unmarshal(data []byte, value interface{}) error { - message, ok := value.(proto.Message) - if !ok { - return errors.New("unable to unmarshal non proto field") - } - return proto.Unmarshal(data, message) -} - -// NewDecoder returns a Decoder which reads proto stream from "reader". -func (marshaller *ProtoMarshaller) NewDecoder(reader io.Reader) Decoder { - return DecoderFunc(func(value interface{}) error { - buffer, err := ioutil.ReadAll(reader) - if err != nil { - return err - } - return marshaller.Unmarshal(buffer, value) - }) -} - -// NewEncoder returns an Encoder which writes proto stream into "writer". -func (marshaller *ProtoMarshaller) NewEncoder(writer io.Writer) Encoder { - return EncoderFunc(func(value interface{}) error { - buffer, err := marshaller.Marshal(value) - if err != nil { - return err - } - _, err = writer.Write(buffer) - if err != nil { - return err - } - - return nil - }) -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go deleted file mode 100644 index 46153294217fd..0000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go +++ /dev/null @@ -1,55 +0,0 @@ -package runtime - -import ( - "io" -) - -// Marshaler defines a conversion between byte sequence and gRPC payloads / fields. -type Marshaler interface { - // Marshal marshals "v" into byte sequence. - Marshal(v interface{}) ([]byte, error) - // Unmarshal unmarshals "data" into "v". - // "v" must be a pointer value. - Unmarshal(data []byte, v interface{}) error - // NewDecoder returns a Decoder which reads byte sequence from "r". - NewDecoder(r io.Reader) Decoder - // NewEncoder returns an Encoder which writes bytes sequence into "w". - NewEncoder(w io.Writer) Encoder - // ContentType returns the Content-Type which this marshaler is responsible for. - ContentType() string -} - -// Marshalers that implement contentTypeMarshaler will have their ContentTypeFromMessage method called -// to set the Content-Type header on the response -type contentTypeMarshaler interface { - // ContentTypeFromMessage returns the Content-Type this marshaler produces from the provided message - ContentTypeFromMessage(v interface{}) string -} - -// Decoder decodes a byte sequence -type Decoder interface { - Decode(v interface{}) error -} - -// Encoder encodes gRPC payloads / fields into byte sequence. -type Encoder interface { - Encode(v interface{}) error -} - -// DecoderFunc adapts an decoder function into Decoder. -type DecoderFunc func(v interface{}) error - -// Decode delegates invocations to the underlying function itself. -func (f DecoderFunc) Decode(v interface{}) error { return f(v) } - -// EncoderFunc adapts an encoder function into Encoder -type EncoderFunc func(v interface{}) error - -// Encode delegates invocations to the underlying function itself. -func (f EncoderFunc) Encode(v interface{}) error { return f(v) } - -// Delimited defines the streaming delimiter. -type Delimited interface { - // Delimiter returns the record separator for the stream. - Delimiter() []byte -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go deleted file mode 100644 index 8dd5c24db4279..0000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go +++ /dev/null @@ -1,99 +0,0 @@ -package runtime - -import ( - "errors" - "mime" - "net/http" - - "google.golang.org/grpc/grpclog" -) - -// MIMEWildcard is the fallback MIME type used for requests which do not match -// a registered MIME type. -const MIMEWildcard = "*" - -var ( - acceptHeader = http.CanonicalHeaderKey("Accept") - contentTypeHeader = http.CanonicalHeaderKey("Content-Type") - - defaultMarshaler = &JSONPb{OrigName: true} -) - -// MarshalerForRequest returns the inbound/outbound marshalers for this request. -// It checks the registry on the ServeMux for the MIME type set by the Content-Type header. -// If it isn't set (or the request Content-Type is empty), checks for "*". -// If there are multiple Content-Type headers set, choose the first one that it can -// exactly match in the registry. -// Otherwise, it follows the above logic for "*"/InboundMarshaler/OutboundMarshaler. -func MarshalerForRequest(mux *ServeMux, r *http.Request) (inbound Marshaler, outbound Marshaler) { - for _, acceptVal := range r.Header[acceptHeader] { - if m, ok := mux.marshalers.mimeMap[acceptVal]; ok { - outbound = m - break - } - } - - for _, contentTypeVal := range r.Header[contentTypeHeader] { - contentType, _, err := mime.ParseMediaType(contentTypeVal) - if err != nil { - grpclog.Infof("Failed to parse Content-Type %s: %v", contentTypeVal, err) - continue - } - if m, ok := mux.marshalers.mimeMap[contentType]; ok { - inbound = m - break - } - } - - if inbound == nil { - inbound = mux.marshalers.mimeMap[MIMEWildcard] - } - if outbound == nil { - outbound = inbound - } - - return inbound, outbound -} - -// marshalerRegistry is a mapping from MIME types to Marshalers. -type marshalerRegistry struct { - mimeMap map[string]Marshaler -} - -// add adds a marshaler for a case-sensitive MIME type string ("*" to match any -// MIME type). -func (m marshalerRegistry) add(mime string, marshaler Marshaler) error { - if len(mime) == 0 { - return errors.New("empty MIME type") - } - - m.mimeMap[mime] = marshaler - - return nil -} - -// makeMarshalerMIMERegistry returns a new registry of marshalers. -// It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces. -// -// For example, you could allow the client to specify the use of the runtime.JSONPb marshaler -// with a "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler -// with a "application/json" Content-Type. -// "*" can be used to match any Content-Type. -// This can be attached to a ServerMux with the marshaler option. -func makeMarshalerMIMERegistry() marshalerRegistry { - return marshalerRegistry{ - mimeMap: map[string]Marshaler{ - MIMEWildcard: defaultMarshaler, - }, - } -} - -// WithMarshalerOption returns a ServeMuxOption which associates inbound and outbound -// Marshalers to a MIME type in mux. -func WithMarshalerOption(mime string, marshaler Marshaler) ServeMuxOption { - return func(mux *ServeMux) { - if err := mux.marshalers.add(mime, marshaler); err != nil { - panic(err) - } - } -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go deleted file mode 100644 index 523a9cb43c930..0000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go +++ /dev/null @@ -1,300 +0,0 @@ -package runtime - -import ( - "context" - "fmt" - "net/http" - "net/textproto" - "strings" - - "github.com/golang/protobuf/proto" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -// A HandlerFunc handles a specific pair of path pattern and HTTP method. -type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string) - -// ErrUnknownURI is the error supplied to a custom ProtoErrorHandlerFunc when -// a request is received with a URI path that does not match any registered -// service method. -// -// Since gRPC servers return an "Unimplemented" code for requests with an -// unrecognized URI path, this error also has a gRPC "Unimplemented" code. -var ErrUnknownURI = status.Error(codes.Unimplemented, http.StatusText(http.StatusNotImplemented)) - -// ServeMux is a request multiplexer for grpc-gateway. -// It matches http requests to patterns and invokes the corresponding handler. -type ServeMux struct { - // handlers maps HTTP method to a list of handlers. - handlers map[string][]handler - forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error - marshalers marshalerRegistry - incomingHeaderMatcher HeaderMatcherFunc - outgoingHeaderMatcher HeaderMatcherFunc - metadataAnnotators []func(context.Context, *http.Request) metadata.MD - streamErrorHandler StreamErrorHandlerFunc - protoErrorHandler ProtoErrorHandlerFunc - disablePathLengthFallback bool - lastMatchWins bool -} - -// ServeMuxOption is an option that can be given to a ServeMux on construction. -type ServeMuxOption func(*ServeMux) - -// WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption. -// -// forwardResponseOption is an option that will be called on the relevant context.Context, -// http.ResponseWriter, and proto.Message before every forwarded response. -// -// The message may be nil in the case where just a header is being sent. -func WithForwardResponseOption(forwardResponseOption func(context.Context, http.ResponseWriter, proto.Message) error) ServeMuxOption { - return func(serveMux *ServeMux) { - serveMux.forwardResponseOptions = append(serveMux.forwardResponseOptions, forwardResponseOption) - } -} - -// SetQueryParameterParser sets the query parameter parser, used to populate message from query parameters. -// Configuring this will mean the generated swagger output is no longer correct, and it should be -// done with careful consideration. -func SetQueryParameterParser(queryParameterParser QueryParameterParser) ServeMuxOption { - return func(serveMux *ServeMux) { - currentQueryParser = queryParameterParser - } -} - -// HeaderMatcherFunc checks whether a header key should be forwarded to/from gRPC context. -type HeaderMatcherFunc func(string) (string, bool) - -// DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header -// keys (as specified by the IANA) to gRPC context with grpcgateway- prefix. HTTP headers that start with -// 'Grpc-Metadata-' are mapped to gRPC metadata after removing prefix 'Grpc-Metadata-'. -func DefaultHeaderMatcher(key string) (string, bool) { - key = textproto.CanonicalMIMEHeaderKey(key) - if isPermanentHTTPHeader(key) { - return MetadataPrefix + key, true - } else if strings.HasPrefix(key, MetadataHeaderPrefix) { - return key[len(MetadataHeaderPrefix):], true - } - return "", false -} - -// WithIncomingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for incoming request to gateway. -// -// This matcher will be called with each header in http.Request. If matcher returns true, that header will be -// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return modified header. -func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption { - return func(mux *ServeMux) { - mux.incomingHeaderMatcher = fn - } -} - -// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway. -// -// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be -// passed to http response returned from gateway. To transform the header before passing to response, -// matcher should return modified header. -func WithOutgoingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption { - return func(mux *ServeMux) { - mux.outgoingHeaderMatcher = fn - } -} - -// WithMetadata returns a ServeMuxOption for passing metadata to a gRPC context. -// -// This can be used by services that need to read from http.Request and modify gRPC context. A common use case -// is reading token from cookie and adding it in gRPC context. -func WithMetadata(annotator func(context.Context, *http.Request) metadata.MD) ServeMuxOption { - return func(serveMux *ServeMux) { - serveMux.metadataAnnotators = append(serveMux.metadataAnnotators, annotator) - } -} - -// WithProtoErrorHandler returns a ServeMuxOption for configuring a custom error handler. -// -// This can be used to handle an error as general proto message defined by gRPC. -// When this option is used, the mux uses the configured error handler instead of HTTPError and -// OtherErrorHandler. -func WithProtoErrorHandler(fn ProtoErrorHandlerFunc) ServeMuxOption { - return func(serveMux *ServeMux) { - serveMux.protoErrorHandler = fn - } -} - -// WithDisablePathLengthFallback returns a ServeMuxOption for disable path length fallback. -func WithDisablePathLengthFallback() ServeMuxOption { - return func(serveMux *ServeMux) { - serveMux.disablePathLengthFallback = true - } -} - -// WithStreamErrorHandler returns a ServeMuxOption that will use the given custom stream -// error handler, which allows for customizing the error trailer for server-streaming -// calls. -// -// For stream errors that occur before any response has been written, the mux's -// ProtoErrorHandler will be invoked. However, once data has been written, the errors must -// be handled differently: they must be included in the response body. The response body's -// final message will include the error details returned by the stream error handler. -func WithStreamErrorHandler(fn StreamErrorHandlerFunc) ServeMuxOption { - return func(serveMux *ServeMux) { - serveMux.streamErrorHandler = fn - } -} - -// WithLastMatchWins returns a ServeMuxOption that will enable "last -// match wins" behavior, where if multiple path patterns match a -// request path, the last one defined in the .proto file will be used. -func WithLastMatchWins() ServeMuxOption { - return func(serveMux *ServeMux) { - serveMux.lastMatchWins = true - } -} - -// NewServeMux returns a new ServeMux whose internal mapping is empty. -func NewServeMux(opts ...ServeMuxOption) *ServeMux { - serveMux := &ServeMux{ - handlers: make(map[string][]handler), - forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0), - marshalers: makeMarshalerMIMERegistry(), - streamErrorHandler: DefaultHTTPStreamErrorHandler, - } - - for _, opt := range opts { - opt(serveMux) - } - - if serveMux.incomingHeaderMatcher == nil { - serveMux.incomingHeaderMatcher = DefaultHeaderMatcher - } - - if serveMux.outgoingHeaderMatcher == nil { - serveMux.outgoingHeaderMatcher = func(key string) (string, bool) { - return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true - } - } - - return serveMux -} - -// Handle associates "h" to the pair of HTTP method and path pattern. -func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) { - if s.lastMatchWins { - s.handlers[meth] = append([]handler{handler{pat: pat, h: h}}, s.handlers[meth]...) - } else { - s.handlers[meth] = append(s.handlers[meth], handler{pat: pat, h: h}) - } -} - -// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.Path. -func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - path := r.URL.Path - if !strings.HasPrefix(path, "/") { - if s.protoErrorHandler != nil { - _, outboundMarshaler := MarshalerForRequest(s, r) - sterr := status.Error(codes.InvalidArgument, http.StatusText(http.StatusBadRequest)) - s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) - } else { - OtherErrorHandler(w, r, http.StatusText(http.StatusBadRequest), http.StatusBadRequest) - } - return - } - - components := strings.Split(path[1:], "/") - l := len(components) - var verb string - if idx := strings.LastIndex(components[l-1], ":"); idx == 0 { - if s.protoErrorHandler != nil { - _, outboundMarshaler := MarshalerForRequest(s, r) - s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI) - } else { - OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound) - } - return - } else if idx > 0 { - c := components[l-1] - components[l-1], verb = c[:idx], c[idx+1:] - } - - if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) { - r.Method = strings.ToUpper(override) - if err := r.ParseForm(); err != nil { - if s.protoErrorHandler != nil { - _, outboundMarshaler := MarshalerForRequest(s, r) - sterr := status.Error(codes.InvalidArgument, err.Error()) - s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) - } else { - OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest) - } - return - } - } - for _, h := range s.handlers[r.Method] { - pathParams, err := h.pat.Match(components, verb) - if err != nil { - continue - } - h.h(w, r, pathParams) - return - } - - // lookup other methods to handle fallback from GET to POST and - // to determine if it is MethodNotAllowed or NotFound. - for m, handlers := range s.handlers { - if m == r.Method { - continue - } - for _, h := range handlers { - pathParams, err := h.pat.Match(components, verb) - if err != nil { - continue - } - // X-HTTP-Method-Override is optional. Always allow fallback to POST. - if s.isPathLengthFallback(r) { - if err := r.ParseForm(); err != nil { - if s.protoErrorHandler != nil { - _, outboundMarshaler := MarshalerForRequest(s, r) - sterr := status.Error(codes.InvalidArgument, err.Error()) - s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) - } else { - OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest) - } - return - } - h.h(w, r, pathParams) - return - } - if s.protoErrorHandler != nil { - _, outboundMarshaler := MarshalerForRequest(s, r) - s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI) - } else { - OtherErrorHandler(w, r, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed) - } - return - } - } - - if s.protoErrorHandler != nil { - _, outboundMarshaler := MarshalerForRequest(s, r) - s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI) - } else { - OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound) - } -} - -// GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux. -func (s *ServeMux) GetForwardResponseOptions() []func(context.Context, http.ResponseWriter, proto.Message) error { - return s.forwardResponseOptions -} - -func (s *ServeMux) isPathLengthFallback(r *http.Request) bool { - return !s.disablePathLengthFallback && r.Method == "POST" && r.Header.Get("Content-Type") == "application/x-www-form-urlencoded" -} - -type handler struct { - pat Pattern - h HandlerFunc -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go deleted file mode 100644 index 09053695da7eb..0000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go +++ /dev/null @@ -1,262 +0,0 @@ -package runtime - -import ( - "errors" - "fmt" - "strings" - - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc/grpclog" -) - -var ( - // ErrNotMatch indicates that the given HTTP request path does not match to the pattern. - ErrNotMatch = errors.New("not match to the path pattern") - // ErrInvalidPattern indicates that the given definition of Pattern is not valid. - ErrInvalidPattern = errors.New("invalid pattern") -) - -type op struct { - code utilities.OpCode - operand int -} - -// Pattern is a template pattern of http request paths defined in github.com/googleapis/googleapis/google/api/http.proto. -type Pattern struct { - // ops is a list of operations - ops []op - // pool is a constant pool indexed by the operands or vars. - pool []string - // vars is a list of variables names to be bound by this pattern - vars []string - // stacksize is the max depth of the stack - stacksize int - // tailLen is the length of the fixed-size segments after a deep wildcard - tailLen int - // verb is the VERB part of the path pattern. It is empty if the pattern does not have VERB part. - verb string - // assumeColonVerb indicates whether a path suffix after a final - // colon may only be interpreted as a verb. - assumeColonVerb bool -} - -type patternOptions struct { - assumeColonVerb bool -} - -// PatternOpt is an option for creating Patterns. -type PatternOpt func(*patternOptions) - -// NewPattern returns a new Pattern from the given definition values. -// "ops" is a sequence of op codes. "pool" is a constant pool. -// "verb" is the verb part of the pattern. It is empty if the pattern does not have the part. -// "version" must be 1 for now. -// It returns an error if the given definition is invalid. -func NewPattern(version int, ops []int, pool []string, verb string, opts ...PatternOpt) (Pattern, error) { - options := patternOptions{ - assumeColonVerb: true, - } - for _, o := range opts { - o(&options) - } - - if version != 1 { - grpclog.Infof("unsupported version: %d", version) - return Pattern{}, ErrInvalidPattern - } - - l := len(ops) - if l%2 != 0 { - grpclog.Infof("odd number of ops codes: %d", l) - return Pattern{}, ErrInvalidPattern - } - - var ( - typedOps []op - stack, maxstack int - tailLen int - pushMSeen bool - vars []string - ) - for i := 0; i < l; i += 2 { - op := op{code: utilities.OpCode(ops[i]), operand: ops[i+1]} - switch op.code { - case utilities.OpNop: - continue - case utilities.OpPush: - if pushMSeen { - tailLen++ - } - stack++ - case utilities.OpPushM: - if pushMSeen { - grpclog.Infof("pushM appears twice") - return Pattern{}, ErrInvalidPattern - } - pushMSeen = true - stack++ - case utilities.OpLitPush: - if op.operand < 0 || len(pool) <= op.operand { - grpclog.Infof("negative literal index: %d", op.operand) - return Pattern{}, ErrInvalidPattern - } - if pushMSeen { - tailLen++ - } - stack++ - case utilities.OpConcatN: - if op.operand <= 0 { - grpclog.Infof("negative concat size: %d", op.operand) - return Pattern{}, ErrInvalidPattern - } - stack -= op.operand - if stack < 0 { - grpclog.Print("stack underflow") - return Pattern{}, ErrInvalidPattern - } - stack++ - case utilities.OpCapture: - if op.operand < 0 || len(pool) <= op.operand { - grpclog.Infof("variable name index out of bound: %d", op.operand) - return Pattern{}, ErrInvalidPattern - } - v := pool[op.operand] - op.operand = len(vars) - vars = append(vars, v) - stack-- - if stack < 0 { - grpclog.Infof("stack underflow") - return Pattern{}, ErrInvalidPattern - } - default: - grpclog.Infof("invalid opcode: %d", op.code) - return Pattern{}, ErrInvalidPattern - } - - if maxstack < stack { - maxstack = stack - } - typedOps = append(typedOps, op) - } - return Pattern{ - ops: typedOps, - pool: pool, - vars: vars, - stacksize: maxstack, - tailLen: tailLen, - verb: verb, - assumeColonVerb: options.assumeColonVerb, - }, nil -} - -// MustPattern is a helper function which makes it easier to call NewPattern in variable initialization. -func MustPattern(p Pattern, err error) Pattern { - if err != nil { - grpclog.Fatalf("Pattern initialization failed: %v", err) - } - return p -} - -// Match examines components if it matches to the Pattern. -// If it matches, the function returns a mapping from field paths to their captured values. -// If otherwise, the function returns an error. -func (p Pattern) Match(components []string, verb string) (map[string]string, error) { - if p.verb != verb { - if p.assumeColonVerb || p.verb != "" { - return nil, ErrNotMatch - } - if len(components) == 0 { - components = []string{":" + verb} - } else { - components = append([]string{}, components...) - components[len(components)-1] += ":" + verb - } - verb = "" - } - - var pos int - stack := make([]string, 0, p.stacksize) - captured := make([]string, len(p.vars)) - l := len(components) - for _, op := range p.ops { - switch op.code { - case utilities.OpNop: - continue - case utilities.OpPush, utilities.OpLitPush: - if pos >= l { - return nil, ErrNotMatch - } - c := components[pos] - if op.code == utilities.OpLitPush { - if lit := p.pool[op.operand]; c != lit { - return nil, ErrNotMatch - } - } - stack = append(stack, c) - pos++ - case utilities.OpPushM: - end := len(components) - if end < pos+p.tailLen { - return nil, ErrNotMatch - } - end -= p.tailLen - stack = append(stack, strings.Join(components[pos:end], "/")) - pos = end - case utilities.OpConcatN: - n := op.operand - l := len(stack) - n - stack = append(stack[:l], strings.Join(stack[l:], "/")) - case utilities.OpCapture: - n := len(stack) - 1 - captured[op.operand] = stack[n] - stack = stack[:n] - } - } - if pos < l { - return nil, ErrNotMatch - } - bindings := make(map[string]string) - for i, val := range captured { - bindings[p.vars[i]] = val - } - return bindings, nil -} - -// Verb returns the verb part of the Pattern. -func (p Pattern) Verb() string { return p.verb } - -func (p Pattern) String() string { - var stack []string - for _, op := range p.ops { - switch op.code { - case utilities.OpNop: - continue - case utilities.OpPush: - stack = append(stack, "*") - case utilities.OpLitPush: - stack = append(stack, p.pool[op.operand]) - case utilities.OpPushM: - stack = append(stack, "**") - case utilities.OpConcatN: - n := op.operand - l := len(stack) - n - stack = append(stack[:l], strings.Join(stack[l:], "/")) - case utilities.OpCapture: - n := len(stack) - 1 - stack[n] = fmt.Sprintf("{%s=%s}", p.vars[op.operand], stack[n]) - } - } - segs := strings.Join(stack, "/") - if p.verb != "" { - return fmt.Sprintf("/%s:%s", segs, p.verb) - } - return "/" + segs -} - -// AssumeColonVerbOpt indicates whether a path suffix after a final -// colon may only be interpreted as a verb. -func AssumeColonVerbOpt(val bool) PatternOpt { - return PatternOpt(func(o *patternOptions) { - o.assumeColonVerb = val - }) -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go deleted file mode 100644 index a3151e2a5528d..0000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go +++ /dev/null @@ -1,80 +0,0 @@ -package runtime - -import ( - "github.com/golang/protobuf/proto" -) - -// StringP returns a pointer to a string whose pointee is same as the given string value. -func StringP(val string) (*string, error) { - return proto.String(val), nil -} - -// BoolP parses the given string representation of a boolean value, -// and returns a pointer to a bool whose value is same as the parsed value. -func BoolP(val string) (*bool, error) { - b, err := Bool(val) - if err != nil { - return nil, err - } - return proto.Bool(b), nil -} - -// Float64P parses the given string representation of a floating point number, -// and returns a pointer to a float64 whose value is same as the parsed number. -func Float64P(val string) (*float64, error) { - f, err := Float64(val) - if err != nil { - return nil, err - } - return proto.Float64(f), nil -} - -// Float32P parses the given string representation of a floating point number, -// and returns a pointer to a float32 whose value is same as the parsed number. -func Float32P(val string) (*float32, error) { - f, err := Float32(val) - if err != nil { - return nil, err - } - return proto.Float32(f), nil -} - -// Int64P parses the given string representation of an integer -// and returns a pointer to a int64 whose value is same as the parsed integer. -func Int64P(val string) (*int64, error) { - i, err := Int64(val) - if err != nil { - return nil, err - } - return proto.Int64(i), nil -} - -// Int32P parses the given string representation of an integer -// and returns a pointer to a int32 whose value is same as the parsed integer. -func Int32P(val string) (*int32, error) { - i, err := Int32(val) - if err != nil { - return nil, err - } - return proto.Int32(i), err -} - -// Uint64P parses the given string representation of an integer -// and returns a pointer to a uint64 whose value is same as the parsed integer. -func Uint64P(val string) (*uint64, error) { - i, err := Uint64(val) - if err != nil { - return nil, err - } - return proto.Uint64(i), err -} - -// Uint32P parses the given string representation of an integer -// and returns a pointer to a uint32 whose value is same as the parsed integer. -func Uint32P(val string) (*uint32, error) { - i, err := Uint32(val) - if err != nil { - return nil, err - } - return proto.Uint32(i), err -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go deleted file mode 100644 index 3fd30da22a70d..0000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go +++ /dev/null @@ -1,106 +0,0 @@ -package runtime - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/ptypes/any" - "github.com/grpc-ecosystem/grpc-gateway/internal" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" -) - -// StreamErrorHandlerFunc accepts an error as a gRPC error generated via status package and translates it into a -// a proto struct used to represent error at the end of a stream. -type StreamErrorHandlerFunc func(context.Context, error) *StreamError - -// StreamError is the payload for the final message in a server stream in the event that the server returns an -// error after a response message has already been sent. -type StreamError internal.StreamError - -// ProtoErrorHandlerFunc handles the error as a gRPC error generated via status package and replies to the request. -type ProtoErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error) - -var _ ProtoErrorHandlerFunc = DefaultHTTPProtoErrorHandler - -// DefaultHTTPProtoErrorHandler is an implementation of HTTPError. -// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode. -// If otherwise, it replies with http.StatusInternalServerError. -// -// The response body returned by this function is a Status message marshaled by a Marshaler. -// -// Do not set this function to HTTPError variable directly, use WithProtoErrorHandler option instead. -func DefaultHTTPProtoErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) { - // return Internal when Marshal failed - const fallback = `{"code": 13, "message": "failed to marshal error message"}` - - s, ok := status.FromError(err) - if !ok { - s = status.New(codes.Unknown, err.Error()) - } - - w.Header().Del("Trailer") - - contentType := marshaler.ContentType() - // Check marshaler on run time in order to keep backwards compatibility - // An interface param needs to be added to the ContentType() function on - // the Marshal interface to be able to remove this check - if typeMarshaler, ok := marshaler.(contentTypeMarshaler); ok { - pb := s.Proto() - contentType = typeMarshaler.ContentTypeFromMessage(pb) - } - w.Header().Set("Content-Type", contentType) - - buf, merr := marshaler.Marshal(s.Proto()) - if merr != nil { - grpclog.Infof("Failed to marshal error message %q: %v", s.Proto(), merr) - w.WriteHeader(http.StatusInternalServerError) - if _, err := io.WriteString(w, fallback); err != nil { - grpclog.Infof("Failed to write response: %v", err) - } - return - } - - md, ok := ServerMetadataFromContext(ctx) - if !ok { - grpclog.Infof("Failed to extract ServerMetadata from context") - } - - handleForwardResponseServerMetadata(w, mux, md) - handleForwardResponseTrailerHeader(w, md) - st := HTTPStatusFromCode(s.Code()) - w.WriteHeader(st) - if _, err := w.Write(buf); err != nil { - grpclog.Infof("Failed to write response: %v", err) - } - - handleForwardResponseTrailer(w, md) -} - -// DefaultHTTPStreamErrorHandler converts the given err into a *StreamError via -// default logic. -// -// It extracts the gRPC status from err if possible. The fields of the status are -// used to populate the returned StreamError, and the HTTP status code is derived -// from the gRPC code via HTTPStatusFromCode. If the given err does not contain a -// gRPC status, an "Unknown" gRPC code is used and "Internal Server Error" HTTP code. -func DefaultHTTPStreamErrorHandler(_ context.Context, err error) *StreamError { - grpcCode := codes.Unknown - grpcMessage := err.Error() - var grpcDetails []*any.Any - if s, ok := status.FromError(err); ok { - grpcCode = s.Code() - grpcMessage = s.Message() - grpcDetails = s.Proto().GetDetails() - } - httpCode := HTTPStatusFromCode(grpcCode) - return &StreamError{ - GrpcCode: int32(grpcCode), - HttpCode: int32(httpCode), - Message: grpcMessage, - HttpStatus: http.StatusText(httpCode), - Details: grpcDetails, - } -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go deleted file mode 100644 index ba66842c33090..0000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go +++ /dev/null @@ -1,406 +0,0 @@ -package runtime - -import ( - "encoding/base64" - "fmt" - "net/url" - "reflect" - "regexp" - "strconv" - "strings" - "time" - - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc/grpclog" -) - -var valuesKeyRegexp = regexp.MustCompile("^(.*)\\[(.*)\\]$") - -var currentQueryParser QueryParameterParser = &defaultQueryParser{} - -// QueryParameterParser defines interface for all query parameter parsers -type QueryParameterParser interface { - Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error -} - -// PopulateQueryParameters parses query parameters -// into "msg" using current query parser -func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error { - return currentQueryParser.Parse(msg, values, filter) -} - -type defaultQueryParser struct{} - -// Parse populates "values" into "msg". -// A value is ignored if its key starts with one of the elements in "filter". -func (*defaultQueryParser) Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error { - for key, values := range values { - match := valuesKeyRegexp.FindStringSubmatch(key) - if len(match) == 3 { - key = match[1] - values = append([]string{match[2]}, values...) - } - fieldPath := strings.Split(key, ".") - if filter.HasCommonPrefix(fieldPath) { - continue - } - if err := populateFieldValueFromPath(msg, fieldPath, values); err != nil { - return err - } - } - return nil -} - -// PopulateFieldFromPath sets a value in a nested Protobuf structure. -// It instantiates missing protobuf fields as it goes. -func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value string) error { - fieldPath := strings.Split(fieldPathString, ".") - return populateFieldValueFromPath(msg, fieldPath, []string{value}) -} - -func populateFieldValueFromPath(msg proto.Message, fieldPath []string, values []string) error { - m := reflect.ValueOf(msg) - if m.Kind() != reflect.Ptr { - return fmt.Errorf("unexpected type %T: %v", msg, msg) - } - var props *proto.Properties - m = m.Elem() - for i, fieldName := range fieldPath { - isLast := i == len(fieldPath)-1 - if !isLast && m.Kind() != reflect.Struct { - return fmt.Errorf("non-aggregate type in the mid of path: %s", strings.Join(fieldPath, ".")) - } - var f reflect.Value - var err error - f, props, err = fieldByProtoName(m, fieldName) - if err != nil { - return err - } else if !f.IsValid() { - grpclog.Infof("field not found in %T: %s", msg, strings.Join(fieldPath, ".")) - return nil - } - - switch f.Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, reflect.String, reflect.Uint32, reflect.Uint64: - if !isLast { - return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], ".")) - } - m = f - case reflect.Slice: - if !isLast { - return fmt.Errorf("unexpected repeated field in %s", strings.Join(fieldPath, ".")) - } - // Handle []byte - if f.Type().Elem().Kind() == reflect.Uint8 { - m = f - break - } - return populateRepeatedField(f, values, props) - case reflect.Ptr: - if f.IsNil() { - m = reflect.New(f.Type().Elem()) - f.Set(m.Convert(f.Type())) - } - m = f.Elem() - continue - case reflect.Struct: - m = f - continue - case reflect.Map: - if !isLast { - return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], ".")) - } - return populateMapField(f, values, props) - default: - return fmt.Errorf("unexpected type %s in %T", f.Type(), msg) - } - } - switch len(values) { - case 0: - return fmt.Errorf("no value of field: %s", strings.Join(fieldPath, ".")) - case 1: - default: - grpclog.Infof("too many field values: %s", strings.Join(fieldPath, ".")) - } - return populateField(m, values[0], props) -} - -// fieldByProtoName looks up a field whose corresponding protobuf field name is "name". -// "m" must be a struct value. It returns zero reflect.Value if no such field found. -func fieldByProtoName(m reflect.Value, name string) (reflect.Value, *proto.Properties, error) { - props := proto.GetProperties(m.Type()) - - // look up field name in oneof map - for _, op := range props.OneofTypes { - if name == op.Prop.OrigName || name == op.Prop.JSONName { - v := reflect.New(op.Type.Elem()) - field := m.Field(op.Field) - if !field.IsNil() { - return reflect.Value{}, nil, fmt.Errorf("field already set for %s oneof", props.Prop[op.Field].OrigName) - } - field.Set(v) - return v.Elem().Field(0), op.Prop, nil - } - } - - for _, p := range props.Prop { - if p.OrigName == name { - return m.FieldByName(p.Name), p, nil - } - if p.JSONName == name { - return m.FieldByName(p.Name), p, nil - } - } - return reflect.Value{}, nil, nil -} - -func populateMapField(f reflect.Value, values []string, props *proto.Properties) error { - if len(values) != 2 { - return fmt.Errorf("more than one value provided for key %s in map %s", values[0], props.Name) - } - - key, value := values[0], values[1] - keyType := f.Type().Key() - valueType := f.Type().Elem() - if f.IsNil() { - f.Set(reflect.MakeMap(f.Type())) - } - - keyConv, ok := convFromType[keyType.Kind()] - if !ok { - return fmt.Errorf("unsupported key type %s in map %s", keyType, props.Name) - } - valueConv, ok := convFromType[valueType.Kind()] - if !ok { - return fmt.Errorf("unsupported value type %s in map %s", valueType, props.Name) - } - - keyV := keyConv.Call([]reflect.Value{reflect.ValueOf(key)}) - if err := keyV[1].Interface(); err != nil { - return err.(error) - } - valueV := valueConv.Call([]reflect.Value{reflect.ValueOf(value)}) - if err := valueV[1].Interface(); err != nil { - return err.(error) - } - - f.SetMapIndex(keyV[0].Convert(keyType), valueV[0].Convert(valueType)) - - return nil -} - -func populateRepeatedField(f reflect.Value, values []string, props *proto.Properties) error { - elemType := f.Type().Elem() - - // is the destination field a slice of an enumeration type? - if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil { - return populateFieldEnumRepeated(f, values, enumValMap) - } - - conv, ok := convFromType[elemType.Kind()] - if !ok { - return fmt.Errorf("unsupported field type %s", elemType) - } - f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type())) - for i, v := range values { - result := conv.Call([]reflect.Value{reflect.ValueOf(v)}) - if err := result[1].Interface(); err != nil { - return err.(error) - } - f.Index(i).Set(result[0].Convert(f.Index(i).Type())) - } - return nil -} - -func populateField(f reflect.Value, value string, props *proto.Properties) error { - i := f.Addr().Interface() - - // Handle protobuf well known types - var name string - switch m := i.(type) { - case interface{ XXX_WellKnownType() string }: - name = m.XXX_WellKnownType() - case proto.Message: - const wktPrefix = "google.protobuf." - if fullName := proto.MessageName(m); strings.HasPrefix(fullName, wktPrefix) { - name = fullName[len(wktPrefix):] - } - } - switch name { - case "Timestamp": - if value == "null" { - f.FieldByName("Seconds").SetInt(0) - f.FieldByName("Nanos").SetInt(0) - return nil - } - - t, err := time.Parse(time.RFC3339Nano, value) - if err != nil { - return fmt.Errorf("bad Timestamp: %v", err) - } - f.FieldByName("Seconds").SetInt(int64(t.Unix())) - f.FieldByName("Nanos").SetInt(int64(t.Nanosecond())) - return nil - case "Duration": - if value == "null" { - f.FieldByName("Seconds").SetInt(0) - f.FieldByName("Nanos").SetInt(0) - return nil - } - d, err := time.ParseDuration(value) - if err != nil { - return fmt.Errorf("bad Duration: %v", err) - } - - ns := d.Nanoseconds() - s := ns / 1e9 - ns %= 1e9 - f.FieldByName("Seconds").SetInt(s) - f.FieldByName("Nanos").SetInt(ns) - return nil - case "DoubleValue": - fallthrough - case "FloatValue": - float64Val, err := strconv.ParseFloat(value, 64) - if err != nil { - return fmt.Errorf("bad DoubleValue: %s", value) - } - f.FieldByName("Value").SetFloat(float64Val) - return nil - case "Int64Value": - fallthrough - case "Int32Value": - int64Val, err := strconv.ParseInt(value, 10, 64) - if err != nil { - return fmt.Errorf("bad DoubleValue: %s", value) - } - f.FieldByName("Value").SetInt(int64Val) - return nil - case "UInt64Value": - fallthrough - case "UInt32Value": - uint64Val, err := strconv.ParseUint(value, 10, 64) - if err != nil { - return fmt.Errorf("bad DoubleValue: %s", value) - } - f.FieldByName("Value").SetUint(uint64Val) - return nil - case "BoolValue": - if value == "true" { - f.FieldByName("Value").SetBool(true) - } else if value == "false" { - f.FieldByName("Value").SetBool(false) - } else { - return fmt.Errorf("bad BoolValue: %s", value) - } - return nil - case "StringValue": - f.FieldByName("Value").SetString(value) - return nil - case "BytesValue": - bytesVal, err := base64.StdEncoding.DecodeString(value) - if err != nil { - return fmt.Errorf("bad BytesValue: %s", value) - } - f.FieldByName("Value").SetBytes(bytesVal) - return nil - case "FieldMask": - p := f.FieldByName("Paths") - for _, v := range strings.Split(value, ",") { - if v != "" { - p.Set(reflect.Append(p, reflect.ValueOf(v))) - } - } - return nil - } - - // Handle Time and Duration stdlib types - switch t := i.(type) { - case *time.Time: - pt, err := time.Parse(time.RFC3339Nano, value) - if err != nil { - return fmt.Errorf("bad Timestamp: %v", err) - } - *t = pt - return nil - case *time.Duration: - d, err := time.ParseDuration(value) - if err != nil { - return fmt.Errorf("bad Duration: %v", err) - } - *t = d - return nil - } - - // is the destination field an enumeration type? - if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil { - return populateFieldEnum(f, value, enumValMap) - } - - conv, ok := convFromType[f.Kind()] - if !ok { - return fmt.Errorf("field type %T is not supported in query parameters", i) - } - result := conv.Call([]reflect.Value{reflect.ValueOf(value)}) - if err := result[1].Interface(); err != nil { - return err.(error) - } - f.Set(result[0].Convert(f.Type())) - return nil -} - -func convertEnum(value string, t reflect.Type, enumValMap map[string]int32) (reflect.Value, error) { - // see if it's an enumeration string - if enumVal, ok := enumValMap[value]; ok { - return reflect.ValueOf(enumVal).Convert(t), nil - } - - // check for an integer that matches an enumeration value - eVal, err := strconv.Atoi(value) - if err != nil { - return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t) - } - for _, v := range enumValMap { - if v == int32(eVal) { - return reflect.ValueOf(eVal).Convert(t), nil - } - } - return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t) -} - -func populateFieldEnum(f reflect.Value, value string, enumValMap map[string]int32) error { - cval, err := convertEnum(value, f.Type(), enumValMap) - if err != nil { - return err - } - f.Set(cval) - return nil -} - -func populateFieldEnumRepeated(f reflect.Value, values []string, enumValMap map[string]int32) error { - elemType := f.Type().Elem() - f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type())) - for i, v := range values { - result, err := convertEnum(v, elemType, enumValMap) - if err != nil { - return err - } - f.Index(i).Set(result) - } - return nil -} - -var ( - convFromType = map[reflect.Kind]reflect.Value{ - reflect.String: reflect.ValueOf(String), - reflect.Bool: reflect.ValueOf(Bool), - reflect.Float64: reflect.ValueOf(Float64), - reflect.Float32: reflect.ValueOf(Float32), - reflect.Int64: reflect.ValueOf(Int64), - reflect.Int32: reflect.ValueOf(Int32), - reflect.Uint64: reflect.ValueOf(Uint64), - reflect.Uint32: reflect.ValueOf(Uint32), - reflect.Slice: reflect.ValueOf(Bytes), - } -) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel deleted file mode 100644 index 7109d79323187..0000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -package(default_visibility = ["//visibility:public"]) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "pattern.go", - "readerfactory.go", - "trie.go", - ], - importpath = "github.com/grpc-ecosystem/grpc-gateway/utilities", -) - -go_test( - name = "go_default_test", - size = "small", - srcs = ["trie_test.go"], - embed = [":go_default_library"], -) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go deleted file mode 100644 index cf79a4d588607..0000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package utilities provides members for internal use in grpc-gateway. -package utilities diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go deleted file mode 100644 index dfe7de4864ab1..0000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go +++ /dev/null @@ -1,22 +0,0 @@ -package utilities - -// An OpCode is a opcode of compiled path patterns. -type OpCode int - -// These constants are the valid values of OpCode. -const ( - // OpNop does nothing - OpNop = OpCode(iota) - // OpPush pushes a component to stack - OpPush - // OpLitPush pushes a component to stack if it matches to the literal - OpLitPush - // OpPushM concatenates the remaining components and pushes it to stack - OpPushM - // OpConcatN pops N items from stack, concatenates them and pushes it back to stack - OpConcatN - // OpCapture pops an item and binds it to the variable - OpCapture - // OpEnd is the least positive invalid opcode. - OpEnd -) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go deleted file mode 100644 index 6dd3854665f19..0000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go +++ /dev/null @@ -1,20 +0,0 @@ -package utilities - -import ( - "bytes" - "io" - "io/ioutil" -) - -// IOReaderFactory takes in an io.Reader and returns a function that will allow you to create a new reader that begins -// at the start of the stream -func IOReaderFactory(r io.Reader) (func() io.Reader, error) { - b, err := ioutil.ReadAll(r) - if err != nil { - return nil, err - } - - return func() io.Reader { - return bytes.NewReader(b) - }, nil -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go deleted file mode 100644 index c2b7b30dd9177..0000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go +++ /dev/null @@ -1,177 +0,0 @@ -package utilities - -import ( - "sort" -) - -// DoubleArray is a Double Array implementation of trie on sequences of strings. -type DoubleArray struct { - // Encoding keeps an encoding from string to int - Encoding map[string]int - // Base is the base array of Double Array - Base []int - // Check is the check array of Double Array - Check []int -} - -// NewDoubleArray builds a DoubleArray from a set of sequences of strings. -func NewDoubleArray(seqs [][]string) *DoubleArray { - da := &DoubleArray{Encoding: make(map[string]int)} - if len(seqs) == 0 { - return da - } - - encoded := registerTokens(da, seqs) - sort.Sort(byLex(encoded)) - - root := node{row: -1, col: -1, left: 0, right: len(encoded)} - addSeqs(da, encoded, 0, root) - - for i := len(da.Base); i > 0; i-- { - if da.Check[i-1] != 0 { - da.Base = da.Base[:i] - da.Check = da.Check[:i] - break - } - } - return da -} - -func registerTokens(da *DoubleArray, seqs [][]string) [][]int { - var result [][]int - for _, seq := range seqs { - var encoded []int - for _, token := range seq { - if _, ok := da.Encoding[token]; !ok { - da.Encoding[token] = len(da.Encoding) - } - encoded = append(encoded, da.Encoding[token]) - } - result = append(result, encoded) - } - for i := range result { - result[i] = append(result[i], len(da.Encoding)) - } - return result -} - -type node struct { - row, col int - left, right int -} - -func (n node) value(seqs [][]int) int { - return seqs[n.row][n.col] -} - -func (n node) children(seqs [][]int) []*node { - var result []*node - lastVal := int(-1) - last := new(node) - for i := n.left; i < n.right; i++ { - if lastVal == seqs[i][n.col+1] { - continue - } - last.right = i - last = &node{ - row: i, - col: n.col + 1, - left: i, - } - result = append(result, last) - } - last.right = n.right - return result -} - -func addSeqs(da *DoubleArray, seqs [][]int, pos int, n node) { - ensureSize(da, pos) - - children := n.children(seqs) - var i int - for i = 1; ; i++ { - ok := func() bool { - for _, child := range children { - code := child.value(seqs) - j := i + code - ensureSize(da, j) - if da.Check[j] != 0 { - return false - } - } - return true - }() - if ok { - break - } - } - da.Base[pos] = i - for _, child := range children { - code := child.value(seqs) - j := i + code - da.Check[j] = pos + 1 - } - terminator := len(da.Encoding) - for _, child := range children { - code := child.value(seqs) - if code == terminator { - continue - } - j := i + code - addSeqs(da, seqs, j, *child) - } -} - -func ensureSize(da *DoubleArray, i int) { - for i >= len(da.Base) { - da.Base = append(da.Base, make([]int, len(da.Base)+1)...) - da.Check = append(da.Check, make([]int, len(da.Check)+1)...) - } -} - -type byLex [][]int - -func (l byLex) Len() int { return len(l) } -func (l byLex) Swap(i, j int) { l[i], l[j] = l[j], l[i] } -func (l byLex) Less(i, j int) bool { - si := l[i] - sj := l[j] - var k int - for k = 0; k < len(si) && k < len(sj); k++ { - if si[k] < sj[k] { - return true - } - if si[k] > sj[k] { - return false - } - } - if k < len(sj) { - return true - } - return false -} - -// HasCommonPrefix determines if any sequence in the DoubleArray is a prefix of the given sequence. -func (da *DoubleArray) HasCommonPrefix(seq []string) bool { - if len(da.Base) == 0 { - return false - } - - var i int - for _, t := range seq { - code, ok := da.Encoding[t] - if !ok { - break - } - j := da.Base[i] + code - if len(da.Check) <= j || da.Check[j] != i+1 { - break - } - i = j - } - j := da.Base[i] + len(da.Encoding) - if len(da.Check) <= j || da.Check[j] != i+1 { - return false - } - return true -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/BUILD.bazel new file mode 100644 index 0000000000000..d71991e6e8dbc --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/BUILD.bazel @@ -0,0 +1,44 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") +load("@rules_proto//proto:defs.bzl", "proto_library") + +package(default_visibility = ["//visibility:public"]) + +filegroup( + name = "options_proto_files", + srcs = [ + "annotations.proto", + "openapiv2.proto", + ], +) + +go_library( + name = "options", + embed = [":options_go_proto"], + importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options", +) + +proto_library( + name = "options_proto", + srcs = [ + "annotations.proto", + "openapiv2.proto", + ], + deps = [ + "@com_google_protobuf//:descriptor_proto", + "@com_google_protobuf//:struct_proto", + ], +) + +go_proto_library( + name = "options_go_proto", + compilers = ["//:go_apiv2"], + importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options", + proto = ":options_proto", +) + +alias( + name = "go_default_library", + actual = ":options", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.pb.go new file mode 100644 index 0000000000000..7064cd96aed44 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.pb.go @@ -0,0 +1,236 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.35.1 +// protoc (unknown) +// source: protoc-gen-openapiv2/options/annotations.proto + +package options + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +var file_protoc_gen_openapiv2_options_annotations_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*Swagger)(nil), + Field: 1042, + Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger", + Tag: "bytes,1042,opt,name=openapiv2_swagger", + Filename: "protoc-gen-openapiv2/options/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.MethodOptions)(nil), + ExtensionType: (*Operation)(nil), + Field: 1042, + Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation", + Tag: "bytes,1042,opt,name=openapiv2_operation", + Filename: "protoc-gen-openapiv2/options/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*Schema)(nil), + Field: 1042, + Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema", + Tag: "bytes,1042,opt,name=openapiv2_schema", + Filename: "protoc-gen-openapiv2/options/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.ServiceOptions)(nil), + ExtensionType: (*Tag)(nil), + Field: 1042, + Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag", + Tag: "bytes,1042,opt,name=openapiv2_tag", + Filename: "protoc-gen-openapiv2/options/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*JSONSchema)(nil), + Field: 1042, + Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field", + Tag: "bytes,1042,opt,name=openapiv2_field", + Filename: "protoc-gen-openapiv2/options/annotations.proto", + }, +} + +// Extension fields to descriptorpb.FileOptions. +var ( + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + // + // optional grpc.gateway.protoc_gen_openapiv2.options.Swagger openapiv2_swagger = 1042; + E_Openapiv2Swagger = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[0] +) + +// Extension fields to descriptorpb.MethodOptions. +var ( + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + // + // optional grpc.gateway.protoc_gen_openapiv2.options.Operation openapiv2_operation = 1042; + E_Openapiv2Operation = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[1] +) + +// Extension fields to descriptorpb.MessageOptions. +var ( + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + // + // optional grpc.gateway.protoc_gen_openapiv2.options.Schema openapiv2_schema = 1042; + E_Openapiv2Schema = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[2] +) + +// Extension fields to descriptorpb.ServiceOptions. +var ( + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + // + // optional grpc.gateway.protoc_gen_openapiv2.options.Tag openapiv2_tag = 1042; + E_Openapiv2Tag = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[3] +) + +// Extension fields to descriptorpb.FieldOptions. +var ( + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + // + // optional grpc.gateway.protoc_gen_openapiv2.options.JSONSchema openapiv2_field = 1042; + E_Openapiv2Field = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[4] +) + +var File_protoc_gen_openapiv2_options_annotations_proto protoreflect.FileDescriptor + +var file_protoc_gen_openapiv2_options_annotations_proto_rawDesc = []byte{ + 0x0a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x29, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x20, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2c, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x7e, 0x0a, 0x11, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, + 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, + 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, + 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x52, 0x10, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x76, 0x32, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x3a, 0x86, 0x01, 0x0a, 0x13, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x12, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x4f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x7e, 0x0a, 0x10, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x52, 0x0f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x3a, 0x75, 0x0a, 0x0d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x5f, 0x74, 0x61, 0x67, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x54, 0x61, 0x67, 0x52, 0x0c, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x54, 0x61, 0x67, 0x3a, 0x7e, 0x0a, 0x0f, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x1d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, + 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x48, 0x5a, 0x46, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2d, 0x65, + 0x63, 0x6f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2d, 0x67, 0x61, + 0x74, 0x65, 0x77, 0x61, 0x79, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, + 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_protoc_gen_openapiv2_options_annotations_proto_goTypes = []any{ + (*descriptorpb.FileOptions)(nil), // 0: google.protobuf.FileOptions + (*descriptorpb.MethodOptions)(nil), // 1: google.protobuf.MethodOptions + (*descriptorpb.MessageOptions)(nil), // 2: google.protobuf.MessageOptions + (*descriptorpb.ServiceOptions)(nil), // 3: google.protobuf.ServiceOptions + (*descriptorpb.FieldOptions)(nil), // 4: google.protobuf.FieldOptions + (*Swagger)(nil), // 5: grpc.gateway.protoc_gen_openapiv2.options.Swagger + (*Operation)(nil), // 6: grpc.gateway.protoc_gen_openapiv2.options.Operation + (*Schema)(nil), // 7: grpc.gateway.protoc_gen_openapiv2.options.Schema + (*Tag)(nil), // 8: grpc.gateway.protoc_gen_openapiv2.options.Tag + (*JSONSchema)(nil), // 9: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema +} +var file_protoc_gen_openapiv2_options_annotations_proto_depIdxs = []int32{ + 0, // 0: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger:extendee -> google.protobuf.FileOptions + 1, // 1: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation:extendee -> google.protobuf.MethodOptions + 2, // 2: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema:extendee -> google.protobuf.MessageOptions + 3, // 3: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag:extendee -> google.protobuf.ServiceOptions + 4, // 4: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field:extendee -> google.protobuf.FieldOptions + 5, // 5: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger + 6, // 6: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation + 7, // 7: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Schema + 8, // 8: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag + 9, // 9: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema + 10, // [10:10] is the sub-list for method output_type + 10, // [10:10] is the sub-list for method input_type + 5, // [5:10] is the sub-list for extension type_name + 0, // [0:5] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_protoc_gen_openapiv2_options_annotations_proto_init() } +func file_protoc_gen_openapiv2_options_annotations_proto_init() { + if File_protoc_gen_openapiv2_options_annotations_proto != nil { + return + } + file_protoc_gen_openapiv2_options_openapiv2_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_protoc_gen_openapiv2_options_annotations_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 5, + NumServices: 0, + }, + GoTypes: file_protoc_gen_openapiv2_options_annotations_proto_goTypes, + DependencyIndexes: file_protoc_gen_openapiv2_options_annotations_proto_depIdxs, + ExtensionInfos: file_protoc_gen_openapiv2_options_annotations_proto_extTypes, + }.Build() + File_protoc_gen_openapiv2_options_annotations_proto = out.File + file_protoc_gen_openapiv2_options_annotations_proto_rawDesc = nil + file_protoc_gen_openapiv2_options_annotations_proto_goTypes = nil + file_protoc_gen_openapiv2_options_annotations_proto_depIdxs = nil +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.proto b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.proto new file mode 100644 index 0000000000000..d63d3c87eb310 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.proto @@ -0,0 +1,44 @@ +syntax = "proto3"; + +package grpc.gateway.protoc_gen_openapiv2.options; + +import "google/protobuf/descriptor.proto"; +import "protoc-gen-openapiv2/options/openapiv2.proto"; + +option go_package = "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options"; + +extend google.protobuf.FileOptions { + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + Swagger openapiv2_swagger = 1042; +} +extend google.protobuf.MethodOptions { + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + Operation openapiv2_operation = 1042; +} +extend google.protobuf.MessageOptions { + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + Schema openapiv2_schema = 1042; +} +extend google.protobuf.ServiceOptions { + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + Tag openapiv2_tag = 1042; +} +extend google.protobuf.FieldOptions { + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + JSONSchema openapiv2_field = 1042; +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.swagger.json b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.swagger.json new file mode 100644 index 0000000000000..8efd122ca6235 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.swagger.json @@ -0,0 +1,44 @@ +{ + "swagger": "2.0", + "info": { + "title": "protoc-gen-openapiv2/options/annotations.proto", + "version": "version not set" + }, + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": {}, + "definitions": { + "protobufAny": { + "type": "object", + "properties": { + "@type": { + "type": "string" + } + }, + "additionalProperties": {} + }, + "rpcStatus": { + "type": "object", + "properties": { + "code": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + }, + "details": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/protobufAny" + } + } + } + } + } +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.pb.go new file mode 100644 index 0000000000000..97b3240bc1a02 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.pb.go @@ -0,0 +1,2958 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.35.1 +// protoc (unknown) +// source: protoc-gen-openapiv2/options/openapiv2.proto + +package options + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Scheme describes the schemes supported by the OpenAPI Swagger +// and Operation objects. +type Scheme int32 + +const ( + Scheme_UNKNOWN Scheme = 0 + Scheme_HTTP Scheme = 1 + Scheme_HTTPS Scheme = 2 + Scheme_WS Scheme = 3 + Scheme_WSS Scheme = 4 +) + +// Enum value maps for Scheme. +var ( + Scheme_name = map[int32]string{ + 0: "UNKNOWN", + 1: "HTTP", + 2: "HTTPS", + 3: "WS", + 4: "WSS", + } + Scheme_value = map[string]int32{ + "UNKNOWN": 0, + "HTTP": 1, + "HTTPS": 2, + "WS": 3, + "WSS": 4, + } +) + +func (x Scheme) Enum() *Scheme { + p := new(Scheme) + *p = x + return p +} + +func (x Scheme) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Scheme) Descriptor() protoreflect.EnumDescriptor { + return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[0].Descriptor() +} + +func (Scheme) Type() protoreflect.EnumType { + return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[0] +} + +func (x Scheme) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Scheme.Descriptor instead. +func (Scheme) EnumDescriptor() ([]byte, []int) { + return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{0} +} + +// `Type` is a supported HTTP header type. +// See https://swagger.io/specification/v2/#parameterType. +type HeaderParameter_Type int32 + +const ( + HeaderParameter_UNKNOWN HeaderParameter_Type = 0 + HeaderParameter_STRING HeaderParameter_Type = 1 + HeaderParameter_NUMBER HeaderParameter_Type = 2 + HeaderParameter_INTEGER HeaderParameter_Type = 3 + HeaderParameter_BOOLEAN HeaderParameter_Type = 4 +) + +// Enum value maps for HeaderParameter_Type. +var ( + HeaderParameter_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "STRING", + 2: "NUMBER", + 3: "INTEGER", + 4: "BOOLEAN", + } + HeaderParameter_Type_value = map[string]int32{ + "UNKNOWN": 0, + "STRING": 1, + "NUMBER": 2, + "INTEGER": 3, + "BOOLEAN": 4, + } +) + +func (x HeaderParameter_Type) Enum() *HeaderParameter_Type { + p := new(HeaderParameter_Type) + *p = x + return p +} + +func (x HeaderParameter_Type) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HeaderParameter_Type) Descriptor() protoreflect.EnumDescriptor { + return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[1].Descriptor() +} + +func (HeaderParameter_Type) Type() protoreflect.EnumType { + return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[1] +} + +func (x HeaderParameter_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use HeaderParameter_Type.Descriptor instead. +func (HeaderParameter_Type) EnumDescriptor() ([]byte, []int) { + return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{3, 0} +} + +type JSONSchema_JSONSchemaSimpleTypes int32 + +const ( + JSONSchema_UNKNOWN JSONSchema_JSONSchemaSimpleTypes = 0 + JSONSchema_ARRAY JSONSchema_JSONSchemaSimpleTypes = 1 + JSONSchema_BOOLEAN JSONSchema_JSONSchemaSimpleTypes = 2 + JSONSchema_INTEGER JSONSchema_JSONSchemaSimpleTypes = 3 + JSONSchema_NULL JSONSchema_JSONSchemaSimpleTypes = 4 + JSONSchema_NUMBER JSONSchema_JSONSchemaSimpleTypes = 5 + JSONSchema_OBJECT JSONSchema_JSONSchemaSimpleTypes = 6 + JSONSchema_STRING JSONSchema_JSONSchemaSimpleTypes = 7 +) + +// Enum value maps for JSONSchema_JSONSchemaSimpleTypes. +var ( + JSONSchema_JSONSchemaSimpleTypes_name = map[int32]string{ + 0: "UNKNOWN", + 1: "ARRAY", + 2: "BOOLEAN", + 3: "INTEGER", + 4: "NULL", + 5: "NUMBER", + 6: "OBJECT", + 7: "STRING", + } + JSONSchema_JSONSchemaSimpleTypes_value = map[string]int32{ + "UNKNOWN": 0, + "ARRAY": 1, + "BOOLEAN": 2, + "INTEGER": 3, + "NULL": 4, + "NUMBER": 5, + "OBJECT": 6, + "STRING": 7, + } +) + +func (x JSONSchema_JSONSchemaSimpleTypes) Enum() *JSONSchema_JSONSchemaSimpleTypes { + p := new(JSONSchema_JSONSchemaSimpleTypes) + *p = x + return p +} + +func (x JSONSchema_JSONSchemaSimpleTypes) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (JSONSchema_JSONSchemaSimpleTypes) Descriptor() protoreflect.EnumDescriptor { + return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[2].Descriptor() +} + +func (JSONSchema_JSONSchemaSimpleTypes) Type() protoreflect.EnumType { + return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[2] +} + +func (x JSONSchema_JSONSchemaSimpleTypes) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use JSONSchema_JSONSchemaSimpleTypes.Descriptor instead. +func (JSONSchema_JSONSchemaSimpleTypes) EnumDescriptor() ([]byte, []int) { + return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{11, 0} +} + +// The type of the security scheme. Valid values are "basic", +// "apiKey" or "oauth2". +type SecurityScheme_Type int32 + +const ( + SecurityScheme_TYPE_INVALID SecurityScheme_Type = 0 + SecurityScheme_TYPE_BASIC SecurityScheme_Type = 1 + SecurityScheme_TYPE_API_KEY SecurityScheme_Type = 2 + SecurityScheme_TYPE_OAUTH2 SecurityScheme_Type = 3 +) + +// Enum value maps for SecurityScheme_Type. +var ( + SecurityScheme_Type_name = map[int32]string{ + 0: "TYPE_INVALID", + 1: "TYPE_BASIC", + 2: "TYPE_API_KEY", + 3: "TYPE_OAUTH2", + } + SecurityScheme_Type_value = map[string]int32{ + "TYPE_INVALID": 0, + "TYPE_BASIC": 1, + "TYPE_API_KEY": 2, + "TYPE_OAUTH2": 3, + } +) + +func (x SecurityScheme_Type) Enum() *SecurityScheme_Type { + p := new(SecurityScheme_Type) + *p = x + return p +} + +func (x SecurityScheme_Type) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SecurityScheme_Type) Descriptor() protoreflect.EnumDescriptor { + return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[3].Descriptor() +} + +func (SecurityScheme_Type) Type() protoreflect.EnumType { + return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[3] +} + +func (x SecurityScheme_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SecurityScheme_Type.Descriptor instead. +func (SecurityScheme_Type) EnumDescriptor() ([]byte, []int) { + return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{14, 0} +} + +// The location of the API key. Valid values are "query" or "header". +type SecurityScheme_In int32 + +const ( + SecurityScheme_IN_INVALID SecurityScheme_In = 0 + SecurityScheme_IN_QUERY SecurityScheme_In = 1 + SecurityScheme_IN_HEADER SecurityScheme_In = 2 +) + +// Enum value maps for SecurityScheme_In. +var ( + SecurityScheme_In_name = map[int32]string{ + 0: "IN_INVALID", + 1: "IN_QUERY", + 2: "IN_HEADER", + } + SecurityScheme_In_value = map[string]int32{ + "IN_INVALID": 0, + "IN_QUERY": 1, + "IN_HEADER": 2, + } +) + +func (x SecurityScheme_In) Enum() *SecurityScheme_In { + p := new(SecurityScheme_In) + *p = x + return p +} + +func (x SecurityScheme_In) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SecurityScheme_In) Descriptor() protoreflect.EnumDescriptor { + return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[4].Descriptor() +} + +func (SecurityScheme_In) Type() protoreflect.EnumType { + return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[4] +} + +func (x SecurityScheme_In) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SecurityScheme_In.Descriptor instead. +func (SecurityScheme_In) EnumDescriptor() ([]byte, []int) { + return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{14, 1} +} + +// The flow used by the OAuth2 security scheme. Valid values are +// "implicit", "password", "application" or "accessCode". +type SecurityScheme_Flow int32 + +const ( + SecurityScheme_FLOW_INVALID SecurityScheme_Flow = 0 + SecurityScheme_FLOW_IMPLICIT SecurityScheme_Flow = 1 + SecurityScheme_FLOW_PASSWORD SecurityScheme_Flow = 2 + SecurityScheme_FLOW_APPLICATION SecurityScheme_Flow = 3 + SecurityScheme_FLOW_ACCESS_CODE SecurityScheme_Flow = 4 +) + +// Enum value maps for SecurityScheme_Flow. +var ( + SecurityScheme_Flow_name = map[int32]string{ + 0: "FLOW_INVALID", + 1: "FLOW_IMPLICIT", + 2: "FLOW_PASSWORD", + 3: "FLOW_APPLICATION", + 4: "FLOW_ACCESS_CODE", + } + SecurityScheme_Flow_value = map[string]int32{ + "FLOW_INVALID": 0, + "FLOW_IMPLICIT": 1, + "FLOW_PASSWORD": 2, + "FLOW_APPLICATION": 3, + "FLOW_ACCESS_CODE": 4, + } +) + +func (x SecurityScheme_Flow) Enum() *SecurityScheme_Flow { + p := new(SecurityScheme_Flow) + *p = x + return p +} + +func (x SecurityScheme_Flow) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SecurityScheme_Flow) Descriptor() protoreflect.EnumDescriptor { + return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[5].Descriptor() +} + +func (SecurityScheme_Flow) Type() protoreflect.EnumType { + return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[5] +} + +func (x SecurityScheme_Flow) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SecurityScheme_Flow.Descriptor instead. +func (SecurityScheme_Flow) EnumDescriptor() ([]byte, []int) { + return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{14, 2} +} + +// `Swagger` is a representation of OpenAPI v2 specification's Swagger object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#swaggerObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// info: { +// title: "Echo API"; +// version: "1.0"; +// description: ""; +// contact: { +// name: "gRPC-Gateway project"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway"; +// email: "none@example.com"; +// }; +// license: { +// name: "BSD 3-Clause License"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE"; +// }; +// }; +// schemes: HTTPS; +// consumes: "application/json"; +// produces: "application/json"; +// }; +type Swagger struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies the OpenAPI Specification version being used. It can be + // used by the OpenAPI UI and other clients to interpret the API listing. The + // value MUST be "2.0". + Swagger string `protobuf:"bytes,1,opt,name=swagger,proto3" json:"swagger,omitempty"` + // Provides metadata about the API. The metadata can be used by the + // clients if needed. + Info *Info `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"` + // The host (name or ip) serving the API. This MUST be the host only and does + // not include the scheme nor sub-paths. It MAY include a port. If the host is + // not included, the host serving the documentation is to be used (including + // the port). The host does not support path templating. + Host string `protobuf:"bytes,3,opt,name=host,proto3" json:"host,omitempty"` + // The base path on which the API is served, which is relative to the host. If + // it is not included, the API is served directly under the host. The value + // MUST start with a leading slash (/). The basePath does not support path + // templating. + // Note that using `base_path` does not change the endpoint paths that are + // generated in the resulting OpenAPI file. If you wish to use `base_path` + // with relatively generated OpenAPI paths, the `base_path` prefix must be + // manually removed from your `google.api.http` paths and your code changed to + // serve the API from the `base_path`. + BasePath string `protobuf:"bytes,4,opt,name=base_path,json=basePath,proto3" json:"base_path,omitempty"` + // The transfer protocol of the API. Values MUST be from the list: "http", + // "https", "ws", "wss". If the schemes is not included, the default scheme to + // be used is the one used to access the OpenAPI definition itself. + Schemes []Scheme `protobuf:"varint,5,rep,packed,name=schemes,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.Scheme" json:"schemes,omitempty"` + // A list of MIME types the APIs can consume. This is global to all APIs but + // can be overridden on specific API calls. Value MUST be as described under + // Mime Types. + Consumes []string `protobuf:"bytes,6,rep,name=consumes,proto3" json:"consumes,omitempty"` + // A list of MIME types the APIs can produce. This is global to all APIs but + // can be overridden on specific API calls. Value MUST be as described under + // Mime Types. + Produces []string `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"` + // An object to hold responses that can be used across operations. This + // property does not define global responses for all operations. + Responses map[string]*Response `protobuf:"bytes,10,rep,name=responses,proto3" json:"responses,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Security scheme definitions that can be used across the specification. + SecurityDefinitions *SecurityDefinitions `protobuf:"bytes,11,opt,name=security_definitions,json=securityDefinitions,proto3" json:"security_definitions,omitempty"` + // A declaration of which security schemes are applied for the API as a whole. + // The list of values describes alternative security schemes that can be used + // (that is, there is a logical OR between the security requirements). + // Individual operations can override this definition. + Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"` + // A list of tags for API documentation control. Tags can be used for logical + // grouping of operations by resources or any other qualifier. + Tags []*Tag `protobuf:"bytes,13,rep,name=tags,proto3" json:"tags,omitempty"` + // Additional external documentation. + ExternalDocs *ExternalDocumentation `protobuf:"bytes,14,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value `protobuf:"bytes,15,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Swagger) Reset() { + *x = Swagger{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Swagger) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Swagger) ProtoMessage() {} + +func (x *Swagger) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Swagger.ProtoReflect.Descriptor instead. +func (*Swagger) Descriptor() ([]byte, []int) { + return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{0} +} + +func (x *Swagger) GetSwagger() string { + if x != nil { + return x.Swagger + } + return "" +} + +func (x *Swagger) GetInfo() *Info { + if x != nil { + return x.Info + } + return nil +} + +func (x *Swagger) GetHost() string { + if x != nil { + return x.Host + } + return "" +} + +func (x *Swagger) GetBasePath() string { + if x != nil { + return x.BasePath + } + return "" +} + +func (x *Swagger) GetSchemes() []Scheme { + if x != nil { + return x.Schemes + } + return nil +} + +func (x *Swagger) GetConsumes() []string { + if x != nil { + return x.Consumes + } + return nil +} + +func (x *Swagger) GetProduces() []string { + if x != nil { + return x.Produces + } + return nil +} + +func (x *Swagger) GetResponses() map[string]*Response { + if x != nil { + return x.Responses + } + return nil +} + +func (x *Swagger) GetSecurityDefinitions() *SecurityDefinitions { + if x != nil { + return x.SecurityDefinitions + } + return nil +} + +func (x *Swagger) GetSecurity() []*SecurityRequirement { + if x != nil { + return x.Security + } + return nil +} + +func (x *Swagger) GetTags() []*Tag { + if x != nil { + return x.Tags + } + return nil +} + +func (x *Swagger) GetExternalDocs() *ExternalDocumentation { + if x != nil { + return x.ExternalDocs + } + return nil +} + +func (x *Swagger) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.Extensions + } + return nil +} + +// `Operation` is a representation of OpenAPI v2 specification's Operation object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#operationObject +// +// Example: +// +// service EchoService { +// rpc Echo(SimpleMessage) returns (SimpleMessage) { +// option (google.api.http) = { +// get: "/v1/example/echo/{id}" +// }; +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { +// summary: "Get a message."; +// operation_id: "getMessage"; +// tags: "echo"; +// responses: { +// key: "200" +// value: { +// description: "OK"; +// } +// } +// }; +// } +// } +type Operation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A list of tags for API documentation control. Tags can be used for logical + // grouping of operations by resources or any other qualifier. + Tags []string `protobuf:"bytes,1,rep,name=tags,proto3" json:"tags,omitempty"` + // A short summary of what the operation does. For maximum readability in the + // swagger-ui, this field SHOULD be less than 120 characters. + Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"` + // A verbose explanation of the operation behavior. GFM syntax can be used for + // rich text representation. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // Additional external documentation for this operation. + ExternalDocs *ExternalDocumentation `protobuf:"bytes,4,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + // Unique string used to identify the operation. The id MUST be unique among + // all operations described in the API. Tools and libraries MAY use the + // operationId to uniquely identify an operation, therefore, it is recommended + // to follow common programming naming conventions. + OperationId string `protobuf:"bytes,5,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"` + // A list of MIME types the operation can consume. This overrides the consumes + // definition at the OpenAPI Object. An empty value MAY be used to clear the + // global definition. Value MUST be as described under Mime Types. + Consumes []string `protobuf:"bytes,6,rep,name=consumes,proto3" json:"consumes,omitempty"` + // A list of MIME types the operation can produce. This overrides the produces + // definition at the OpenAPI Object. An empty value MAY be used to clear the + // global definition. Value MUST be as described under Mime Types. + Produces []string `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"` + // The list of possible responses as they are returned from executing this + // operation. + Responses map[string]*Response `protobuf:"bytes,9,rep,name=responses,proto3" json:"responses,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The transfer protocol for the operation. Values MUST be from the list: + // "http", "https", "ws", "wss". The value overrides the OpenAPI Object + // schemes definition. + Schemes []Scheme `protobuf:"varint,10,rep,packed,name=schemes,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.Scheme" json:"schemes,omitempty"` + // Declares this operation to be deprecated. Usage of the declared operation + // should be refrained. Default value is false. + Deprecated bool `protobuf:"varint,11,opt,name=deprecated,proto3" json:"deprecated,omitempty"` + // A declaration of which security schemes are applied for this operation. The + // list of values describes alternative security schemes that can be used + // (that is, there is a logical OR between the security requirements). This + // definition overrides any declared top-level security. To remove a top-level + // security declaration, an empty array can be used. + Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"` + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value `protobuf:"bytes,13,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Custom parameters such as HTTP request headers. + // See: https://swagger.io/docs/specification/2-0/describing-parameters/ + // and https://swagger.io/specification/v2/#parameter-object. + Parameters *Parameters `protobuf:"bytes,14,opt,name=parameters,proto3" json:"parameters,omitempty"` +} + +func (x *Operation) Reset() { + *x = Operation{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Operation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Operation) ProtoMessage() {} + +func (x *Operation) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Operation.ProtoReflect.Descriptor instead. +func (*Operation) Descriptor() ([]byte, []int) { + return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{1} +} + +func (x *Operation) GetTags() []string { + if x != nil { + return x.Tags + } + return nil +} + +func (x *Operation) GetSummary() string { + if x != nil { + return x.Summary + } + return "" +} + +func (x *Operation) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Operation) GetExternalDocs() *ExternalDocumentation { + if x != nil { + return x.ExternalDocs + } + return nil +} + +func (x *Operation) GetOperationId() string { + if x != nil { + return x.OperationId + } + return "" +} + +func (x *Operation) GetConsumes() []string { + if x != nil { + return x.Consumes + } + return nil +} + +func (x *Operation) GetProduces() []string { + if x != nil { + return x.Produces + } + return nil +} + +func (x *Operation) GetResponses() map[string]*Response { + if x != nil { + return x.Responses + } + return nil +} + +func (x *Operation) GetSchemes() []Scheme { + if x != nil { + return x.Schemes + } + return nil +} + +func (x *Operation) GetDeprecated() bool { + if x != nil { + return x.Deprecated + } + return false +} + +func (x *Operation) GetSecurity() []*SecurityRequirement { + if x != nil { + return x.Security + } + return nil +} + +func (x *Operation) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.Extensions + } + return nil +} + +func (x *Operation) GetParameters() *Parameters { + if x != nil { + return x.Parameters + } + return nil +} + +// `Parameters` is a representation of OpenAPI v2 specification's parameters object. +// Note: This technically breaks compatibility with the OpenAPI 2 definition structure as we only +// allow header parameters to be set here since we do not want users specifying custom non-header +// parameters beyond those inferred from the Protobuf schema. +// See: https://swagger.io/specification/v2/#parameter-object +type Parameters struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // `Headers` is one or more HTTP header parameter. + // See: https://swagger.io/docs/specification/2-0/describing-parameters/#header-parameters + Headers []*HeaderParameter `protobuf:"bytes,1,rep,name=headers,proto3" json:"headers,omitempty"` +} + +func (x *Parameters) Reset() { + *x = Parameters{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Parameters) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Parameters) ProtoMessage() {} + +func (x *Parameters) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Parameters.ProtoReflect.Descriptor instead. +func (*Parameters) Descriptor() ([]byte, []int) { + return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{2} +} + +func (x *Parameters) GetHeaders() []*HeaderParameter { + if x != nil { + return x.Headers + } + return nil +} + +// `HeaderParameter` a HTTP header parameter. +// See: https://swagger.io/specification/v2/#parameter-object +type HeaderParameter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // `Name` is the header name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // `Description` is a short description of the header. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // `Type` is the type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported. + // See: https://swagger.io/specification/v2/#parameterType. + Type HeaderParameter_Type `protobuf:"varint,3,opt,name=type,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter_Type" json:"type,omitempty"` + // `Format` The extending format for the previously mentioned type. + Format string `protobuf:"bytes,4,opt,name=format,proto3" json:"format,omitempty"` + // `Required` indicates if the header is optional + Required bool `protobuf:"varint,5,opt,name=required,proto3" json:"required,omitempty"` +} + +func (x *HeaderParameter) Reset() { + *x = HeaderParameter{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *HeaderParameter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeaderParameter) ProtoMessage() {} + +func (x *HeaderParameter) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeaderParameter.ProtoReflect.Descriptor instead. +func (*HeaderParameter) Descriptor() ([]byte, []int) { + return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{3} +} + +func (x *HeaderParameter) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *HeaderParameter) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *HeaderParameter) GetType() HeaderParameter_Type { + if x != nil { + return x.Type + } + return HeaderParameter_UNKNOWN +} + +func (x *HeaderParameter) GetFormat() string { + if x != nil { + return x.Format + } + return "" +} + +func (x *HeaderParameter) GetRequired() bool { + if x != nil { + return x.Required + } + return false +} + +// `Header` is a representation of OpenAPI v2 specification's Header object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#headerObject +type Header struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // `Description` is a short description of the header. + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + // The type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported. + Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + // `Format` The extending format for the previously mentioned type. + Format string `protobuf:"bytes,3,opt,name=format,proto3" json:"format,omitempty"` + // `Default` Declares the value of the header that the server will use if none is provided. + // See: https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-6.2. + // Unlike JSON Schema this value MUST conform to the defined type for the header. + Default string `protobuf:"bytes,6,opt,name=default,proto3" json:"default,omitempty"` + // 'Pattern' See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.3. + Pattern string `protobuf:"bytes,13,opt,name=pattern,proto3" json:"pattern,omitempty"` +} + +func (x *Header) Reset() { + *x = Header{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Header) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Header) ProtoMessage() {} + +func (x *Header) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Header.ProtoReflect.Descriptor instead. +func (*Header) Descriptor() ([]byte, []int) { + return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{4} +} + +func (x *Header) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Header) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Header) GetFormat() string { + if x != nil { + return x.Format + } + return "" +} + +func (x *Header) GetDefault() string { + if x != nil { + return x.Default + } + return "" +} + +func (x *Header) GetPattern() string { + if x != nil { + return x.Pattern + } + return "" +} + +// `Response` is a representation of OpenAPI v2 specification's Response object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#responseObject +type Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // `Description` is a short description of the response. + // GFM syntax can be used for rich text representation. + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + // `Schema` optionally defines the structure of the response. + // If `Schema` is not provided, it means there is no content to the response. + Schema *Schema `protobuf:"bytes,2,opt,name=schema,proto3" json:"schema,omitempty"` + // `Headers` A list of headers that are sent with the response. + // `Header` name is expected to be a string in the canonical format of the MIME header key + // See: https://golang.org/pkg/net/textproto/#CanonicalMIMEHeaderKey + Headers map[string]*Header `protobuf:"bytes,3,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // `Examples` gives per-mimetype response examples. + // See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#example-object + Examples map[string]string `protobuf:"bytes,4,rep,name=examples,proto3" json:"examples,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value `protobuf:"bytes,5,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Response) Reset() { + *x = Response{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Response) ProtoMessage() {} + +func (x *Response) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Response.ProtoReflect.Descriptor instead. +func (*Response) Descriptor() ([]byte, []int) { + return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{5} +} + +func (x *Response) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Response) GetSchema() *Schema { + if x != nil { + return x.Schema + } + return nil +} + +func (x *Response) GetHeaders() map[string]*Header { + if x != nil { + return x.Headers + } + return nil +} + +func (x *Response) GetExamples() map[string]string { + if x != nil { + return x.Examples + } + return nil +} + +func (x *Response) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.Extensions + } + return nil +} + +// `Info` is a representation of OpenAPI v2 specification's Info object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#infoObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// info: { +// title: "Echo API"; +// version: "1.0"; +// description: ""; +// contact: { +// name: "gRPC-Gateway project"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway"; +// email: "none@example.com"; +// }; +// license: { +// name: "BSD 3-Clause License"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE"; +// }; +// }; +// ... +// }; +type Info struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The title of the application. + Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"` + // A short description of the application. GFM syntax can be used for rich + // text representation. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // The Terms of Service for the API. + TermsOfService string `protobuf:"bytes,3,opt,name=terms_of_service,json=termsOfService,proto3" json:"terms_of_service,omitempty"` + // The contact information for the exposed API. + Contact *Contact `protobuf:"bytes,4,opt,name=contact,proto3" json:"contact,omitempty"` + // The license information for the exposed API. + License *License `protobuf:"bytes,5,opt,name=license,proto3" json:"license,omitempty"` + // Provides the version of the application API (not to be confused + // with the specification version). + Version string `protobuf:"bytes,6,opt,name=version,proto3" json:"version,omitempty"` + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value `protobuf:"bytes,7,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Info) Reset() { + *x = Info{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Info) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Info) ProtoMessage() {} + +func (x *Info) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Info.ProtoReflect.Descriptor instead. +func (*Info) Descriptor() ([]byte, []int) { + return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{6} +} + +func (x *Info) GetTitle() string { + if x != nil { + return x.Title + } + return "" +} + +func (x *Info) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Info) GetTermsOfService() string { + if x != nil { + return x.TermsOfService + } + return "" +} + +func (x *Info) GetContact() *Contact { + if x != nil { + return x.Contact + } + return nil +} + +func (x *Info) GetLicense() *License { + if x != nil { + return x.License + } + return nil +} + +func (x *Info) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *Info) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.Extensions + } + return nil +} + +// `Contact` is a representation of OpenAPI v2 specification's Contact object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#contactObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// info: { +// ... +// contact: { +// name: "gRPC-Gateway project"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway"; +// email: "none@example.com"; +// }; +// ... +// }; +// ... +// }; +type Contact struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The identifying name of the contact person/organization. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The URL pointing to the contact information. MUST be in the format of a + // URL. + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + // The email address of the contact person/organization. MUST be in the format + // of an email address. + Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"` +} + +func (x *Contact) Reset() { + *x = Contact{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Contact) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Contact) ProtoMessage() {} + +func (x *Contact) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Contact.ProtoReflect.Descriptor instead. +func (*Contact) Descriptor() ([]byte, []int) { + return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{7} +} + +func (x *Contact) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Contact) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +func (x *Contact) GetEmail() string { + if x != nil { + return x.Email + } + return "" +} + +// `License` is a representation of OpenAPI v2 specification's License object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#licenseObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// info: { +// ... +// license: { +// name: "BSD 3-Clause License"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE"; +// }; +// ... +// }; +// ... +// }; +type License struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The license name used for the API. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A URL to the license used for the API. MUST be in the format of a URL. + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` +} + +func (x *License) Reset() { + *x = License{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *License) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*License) ProtoMessage() {} + +func (x *License) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use License.ProtoReflect.Descriptor instead. +func (*License) Descriptor() ([]byte, []int) { + return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{8} +} + +func (x *License) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *License) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +// `ExternalDocumentation` is a representation of OpenAPI v2 specification's +// ExternalDocumentation object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#externalDocumentationObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// ... +// external_docs: { +// description: "More about gRPC-Gateway"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway"; +// } +// ... +// }; +type ExternalDocumentation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A short description of the target documentation. GFM syntax can be used for + // rich text representation. + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + // The URL for the target documentation. Value MUST be in the format + // of a URL. + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` +} + +func (x *ExternalDocumentation) Reset() { + *x = ExternalDocumentation{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ExternalDocumentation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExternalDocumentation) ProtoMessage() {} + +func (x *ExternalDocumentation) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExternalDocumentation.ProtoReflect.Descriptor instead. +func (*ExternalDocumentation) Descriptor() ([]byte, []int) { + return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{9} +} + +func (x *ExternalDocumentation) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *ExternalDocumentation) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +// `Schema` is a representation of OpenAPI v2 specification's Schema object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject +type Schema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + JsonSchema *JSONSchema `protobuf:"bytes,1,opt,name=json_schema,json=jsonSchema,proto3" json:"json_schema,omitempty"` + // Adds support for polymorphism. The discriminator is the schema property + // name that is used to differentiate between other schema that inherit this + // schema. The property name used MUST be defined at this schema and it MUST + // be in the required property list. When used, the value MUST be the name of + // this schema or any schema that inherits it. + Discriminator string `protobuf:"bytes,2,opt,name=discriminator,proto3" json:"discriminator,omitempty"` + // Relevant only for Schema "properties" definitions. Declares the property as + // "read only". This means that it MAY be sent as part of a response but MUST + // NOT be sent as part of the request. Properties marked as readOnly being + // true SHOULD NOT be in the required list of the defined schema. Default + // value is false. + ReadOnly bool `protobuf:"varint,3,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + // Additional external documentation for this schema. + ExternalDocs *ExternalDocumentation `protobuf:"bytes,5,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + // A free-form property to include an example of an instance for this schema in JSON. + // This is copied verbatim to the output. + Example string `protobuf:"bytes,6,opt,name=example,proto3" json:"example,omitempty"` +} + +func (x *Schema) Reset() { + *x = Schema{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Schema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Schema) ProtoMessage() {} + +func (x *Schema) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Schema.ProtoReflect.Descriptor instead. +func (*Schema) Descriptor() ([]byte, []int) { + return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{10} +} + +func (x *Schema) GetJsonSchema() *JSONSchema { + if x != nil { + return x.JsonSchema + } + return nil +} + +func (x *Schema) GetDiscriminator() string { + if x != nil { + return x.Discriminator + } + return "" +} + +func (x *Schema) GetReadOnly() bool { + if x != nil { + return x.ReadOnly + } + return false +} + +func (x *Schema) GetExternalDocs() *ExternalDocumentation { + if x != nil { + return x.ExternalDocs + } + return nil +} + +func (x *Schema) GetExample() string { + if x != nil { + return x.Example + } + return "" +} + +// `JSONSchema` represents properties from JSON Schema taken, and as used, in +// the OpenAPI v2 spec. +// +// This includes changes made by OpenAPI v2. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject +// +// See also: https://cswr.github.io/JsonSchema/spec/basic_types/, +// https://github.com/json-schema-org/json-schema-spec/blob/master/schema.json +// +// Example: +// +// message SimpleMessage { +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema) = { +// json_schema: { +// title: "SimpleMessage" +// description: "A simple message." +// required: ["id"] +// } +// }; +// +// // Id represents the message identifier. +// string id = 1; [ +// (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = { +// description: "The unique identifier of the simple message." +// }]; +// } +type JSONSchema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Ref is used to define an external reference to include in the message. + // This could be a fully qualified proto message reference, and that type must + // be imported into the protofile. If no message is identified, the Ref will + // be used verbatim in the output. + // For example: + // + // `ref: ".google.protobuf.Timestamp"`. + Ref string `protobuf:"bytes,3,opt,name=ref,proto3" json:"ref,omitempty"` + // The title of the schema. + Title string `protobuf:"bytes,5,opt,name=title,proto3" json:"title,omitempty"` + // A short description of the schema. + Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"` + Default string `protobuf:"bytes,7,opt,name=default,proto3" json:"default,omitempty"` + ReadOnly bool `protobuf:"varint,8,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + // A free-form property to include a JSON example of this field. This is copied + // verbatim to the output swagger.json. Quotes must be escaped. + // This property is the same for 2.0 and 3.0.0 https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/3.0.0.md#schemaObject https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject + Example string `protobuf:"bytes,9,opt,name=example,proto3" json:"example,omitempty"` + MultipleOf float64 `protobuf:"fixed64,10,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + // Maximum represents an inclusive upper limit for a numeric instance. The + // value of MUST be a number, + Maximum float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + // minimum represents an inclusive lower limit for a numeric instance. The + // value of MUST be a number, + Minimum float64 `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength uint64 `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength uint64 `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems uint64 `protobuf:"varint,20,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems uint64 `protobuf:"varint,21,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,22,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + MaxProperties uint64 `protobuf:"varint,24,opt,name=max_properties,json=maxProperties,proto3" json:"max_properties,omitempty"` + MinProperties uint64 `protobuf:"varint,25,opt,name=min_properties,json=minProperties,proto3" json:"min_properties,omitempty"` + Required []string `protobuf:"bytes,26,rep,name=required,proto3" json:"required,omitempty"` + // Items in 'array' must be unique. + Array []string `protobuf:"bytes,34,rep,name=array,proto3" json:"array,omitempty"` + Type []JSONSchema_JSONSchemaSimpleTypes `protobuf:"varint,35,rep,packed,name=type,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.JSONSchema_JSONSchemaSimpleTypes" json:"type,omitempty"` + // `Format` + Format string `protobuf:"bytes,36,opt,name=format,proto3" json:"format,omitempty"` + // Items in `enum` must be unique https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.5.1 + Enum []string `protobuf:"bytes,46,rep,name=enum,proto3" json:"enum,omitempty"` + // Additional field level properties used when generating the OpenAPI v2 file. + FieldConfiguration *JSONSchema_FieldConfiguration `protobuf:"bytes,1001,opt,name=field_configuration,json=fieldConfiguration,proto3" json:"field_configuration,omitempty"` + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value `protobuf:"bytes,48,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *JSONSchema) Reset() { + *x = JSONSchema{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *JSONSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JSONSchema) ProtoMessage() {} + +func (x *JSONSchema) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use JSONSchema.ProtoReflect.Descriptor instead. +func (*JSONSchema) Descriptor() ([]byte, []int) { + return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{11} +} + +func (x *JSONSchema) GetRef() string { + if x != nil { + return x.Ref + } + return "" +} + +func (x *JSONSchema) GetTitle() string { + if x != nil { + return x.Title + } + return "" +} + +func (x *JSONSchema) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *JSONSchema) GetDefault() string { + if x != nil { + return x.Default + } + return "" +} + +func (x *JSONSchema) GetReadOnly() bool { + if x != nil { + return x.ReadOnly + } + return false +} + +func (x *JSONSchema) GetExample() string { + if x != nil { + return x.Example + } + return "" +} + +func (x *JSONSchema) GetMultipleOf() float64 { + if x != nil { + return x.MultipleOf + } + return 0 +} + +func (x *JSONSchema) GetMaximum() float64 { + if x != nil { + return x.Maximum + } + return 0 +} + +func (x *JSONSchema) GetExclusiveMaximum() bool { + if x != nil { + return x.ExclusiveMaximum + } + return false +} + +func (x *JSONSchema) GetMinimum() float64 { + if x != nil { + return x.Minimum + } + return 0 +} + +func (x *JSONSchema) GetExclusiveMinimum() bool { + if x != nil { + return x.ExclusiveMinimum + } + return false +} + +func (x *JSONSchema) GetMaxLength() uint64 { + if x != nil { + return x.MaxLength + } + return 0 +} + +func (x *JSONSchema) GetMinLength() uint64 { + if x != nil { + return x.MinLength + } + return 0 +} + +func (x *JSONSchema) GetPattern() string { + if x != nil { + return x.Pattern + } + return "" +} + +func (x *JSONSchema) GetMaxItems() uint64 { + if x != nil { + return x.MaxItems + } + return 0 +} + +func (x *JSONSchema) GetMinItems() uint64 { + if x != nil { + return x.MinItems + } + return 0 +} + +func (x *JSONSchema) GetUniqueItems() bool { + if x != nil { + return x.UniqueItems + } + return false +} + +func (x *JSONSchema) GetMaxProperties() uint64 { + if x != nil { + return x.MaxProperties + } + return 0 +} + +func (x *JSONSchema) GetMinProperties() uint64 { + if x != nil { + return x.MinProperties + } + return 0 +} + +func (x *JSONSchema) GetRequired() []string { + if x != nil { + return x.Required + } + return nil +} + +func (x *JSONSchema) GetArray() []string { + if x != nil { + return x.Array + } + return nil +} + +func (x *JSONSchema) GetType() []JSONSchema_JSONSchemaSimpleTypes { + if x != nil { + return x.Type + } + return nil +} + +func (x *JSONSchema) GetFormat() string { + if x != nil { + return x.Format + } + return "" +} + +func (x *JSONSchema) GetEnum() []string { + if x != nil { + return x.Enum + } + return nil +} + +func (x *JSONSchema) GetFieldConfiguration() *JSONSchema_FieldConfiguration { + if x != nil { + return x.FieldConfiguration + } + return nil +} + +func (x *JSONSchema) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.Extensions + } + return nil +} + +// `Tag` is a representation of OpenAPI v2 specification's Tag object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#tagObject +type Tag struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the tag. Use it to allow override of the name of a + // global Tag object, then use that name to reference the tag throughout the + // OpenAPI file. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A short description for the tag. GFM syntax can be used for rich text + // representation. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // Additional external documentation for this tag. + ExternalDocs *ExternalDocumentation `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value `protobuf:"bytes,4,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Tag) Reset() { + *x = Tag{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Tag) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Tag) ProtoMessage() {} + +func (x *Tag) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[12] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Tag.ProtoReflect.Descriptor instead. +func (*Tag) Descriptor() ([]byte, []int) { + return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{12} +} + +func (x *Tag) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Tag) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Tag) GetExternalDocs() *ExternalDocumentation { + if x != nil { + return x.ExternalDocs + } + return nil +} + +func (x *Tag) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.Extensions + } + return nil +} + +// `SecurityDefinitions` is a representation of OpenAPI v2 specification's +// Security Definitions object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityDefinitionsObject +// +// A declaration of the security schemes available to be used in the +// specification. This does not enforce the security schemes on the operations +// and only serves to provide the relevant details for each scheme. +type SecurityDefinitions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A single security scheme definition, mapping a "name" to the scheme it + // defines. + Security map[string]*SecurityScheme `protobuf:"bytes,1,rep,name=security,proto3" json:"security,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *SecurityDefinitions) Reset() { + *x = SecurityDefinitions{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SecurityDefinitions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityDefinitions) ProtoMessage() {} + +func (x *SecurityDefinitions) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[13] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SecurityDefinitions.ProtoReflect.Descriptor instead. +func (*SecurityDefinitions) Descriptor() ([]byte, []int) { + return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{13} +} + +func (x *SecurityDefinitions) GetSecurity() map[string]*SecurityScheme { + if x != nil { + return x.Security + } + return nil +} + +// `SecurityScheme` is a representation of OpenAPI v2 specification's +// Security Scheme object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securitySchemeObject +// +// Allows the definition of a security scheme that can be used by the +// operations. Supported schemes are basic authentication, an API key (either as +// a header or as a query parameter) and OAuth2's common flows (implicit, +// password, application and access code). +type SecurityScheme struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The type of the security scheme. Valid values are "basic", + // "apiKey" or "oauth2". + Type SecurityScheme_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme_Type" json:"type,omitempty"` + // A short description for security scheme. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // The name of the header or query parameter to be used. + // Valid for apiKey. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // The location of the API key. Valid values are "query" or + // "header". + // Valid for apiKey. + In SecurityScheme_In `protobuf:"varint,4,opt,name=in,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme_In" json:"in,omitempty"` + // The flow used by the OAuth2 security scheme. Valid values are + // "implicit", "password", "application" or "accessCode". + // Valid for oauth2. + Flow SecurityScheme_Flow `protobuf:"varint,5,opt,name=flow,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme_Flow" json:"flow,omitempty"` + // The authorization URL to be used for this flow. This SHOULD be in + // the form of a URL. + // Valid for oauth2/implicit and oauth2/accessCode. + AuthorizationUrl string `protobuf:"bytes,6,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"` + // The token URL to be used for this flow. This SHOULD be in the + // form of a URL. + // Valid for oauth2/password, oauth2/application and oauth2/accessCode. + TokenUrl string `protobuf:"bytes,7,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` + // The available scopes for the OAuth2 security scheme. + // Valid for oauth2. + Scopes *Scopes `protobuf:"bytes,8,opt,name=scopes,proto3" json:"scopes,omitempty"` + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + Extensions map[string]*structpb.Value `protobuf:"bytes,9,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *SecurityScheme) Reset() { + *x = SecurityScheme{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SecurityScheme) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityScheme) ProtoMessage() {} + +func (x *SecurityScheme) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[14] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SecurityScheme.ProtoReflect.Descriptor instead. +func (*SecurityScheme) Descriptor() ([]byte, []int) { + return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{14} +} + +func (x *SecurityScheme) GetType() SecurityScheme_Type { + if x != nil { + return x.Type + } + return SecurityScheme_TYPE_INVALID +} + +func (x *SecurityScheme) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *SecurityScheme) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *SecurityScheme) GetIn() SecurityScheme_In { + if x != nil { + return x.In + } + return SecurityScheme_IN_INVALID +} + +func (x *SecurityScheme) GetFlow() SecurityScheme_Flow { + if x != nil { + return x.Flow + } + return SecurityScheme_FLOW_INVALID +} + +func (x *SecurityScheme) GetAuthorizationUrl() string { + if x != nil { + return x.AuthorizationUrl + } + return "" +} + +func (x *SecurityScheme) GetTokenUrl() string { + if x != nil { + return x.TokenUrl + } + return "" +} + +func (x *SecurityScheme) GetScopes() *Scopes { + if x != nil { + return x.Scopes + } + return nil +} + +func (x *SecurityScheme) GetExtensions() map[string]*structpb.Value { + if x != nil { + return x.Extensions + } + return nil +} + +// `SecurityRequirement` is a representation of OpenAPI v2 specification's +// Security Requirement object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityRequirementObject +// +// Lists the required security schemes to execute this operation. The object can +// have multiple security schemes declared in it which are all required (that +// is, there is a logical AND between the schemes). +// +// The name used for each property MUST correspond to a security scheme +// declared in the Security Definitions. +type SecurityRequirement struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Each name must correspond to a security scheme which is declared in + // the Security Definitions. If the security scheme is of type "oauth2", + // then the value is a list of scope names required for the execution. + // For other security scheme types, the array MUST be empty. + SecurityRequirement map[string]*SecurityRequirement_SecurityRequirementValue `protobuf:"bytes,1,rep,name=security_requirement,json=securityRequirement,proto3" json:"security_requirement,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *SecurityRequirement) Reset() { + *x = SecurityRequirement{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SecurityRequirement) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityRequirement) ProtoMessage() {} + +func (x *SecurityRequirement) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[15] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SecurityRequirement.ProtoReflect.Descriptor instead. +func (*SecurityRequirement) Descriptor() ([]byte, []int) { + return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{15} +} + +func (x *SecurityRequirement) GetSecurityRequirement() map[string]*SecurityRequirement_SecurityRequirementValue { + if x != nil { + return x.SecurityRequirement + } + return nil +} + +// `Scopes` is a representation of OpenAPI v2 specification's Scopes object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#scopesObject +// +// Lists the available scopes for an OAuth2 security scheme. +type Scopes struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Maps between a name of a scope to a short description of it (as the value + // of the property). + Scope map[string]string `protobuf:"bytes,1,rep,name=scope,proto3" json:"scope,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Scopes) Reset() { + *x = Scopes{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Scopes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Scopes) ProtoMessage() {} + +func (x *Scopes) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[16] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Scopes.ProtoReflect.Descriptor instead. +func (*Scopes) Descriptor() ([]byte, []int) { + return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{16} +} + +func (x *Scopes) GetScope() map[string]string { + if x != nil { + return x.Scope + } + return nil +} + +// 'FieldConfiguration' provides additional field level properties used when generating the OpenAPI v2 file. +// These properties are not defined by OpenAPIv2, but they are used to control the generation. +type JSONSchema_FieldConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Alternative parameter name when used as path parameter. If set, this will + // be used as the complete parameter name when this field is used as a path + // parameter. Use this to avoid having auto generated path parameter names + // for overlapping paths. + PathParamName string `protobuf:"bytes,47,opt,name=path_param_name,json=pathParamName,proto3" json:"path_param_name,omitempty"` +} + +func (x *JSONSchema_FieldConfiguration) Reset() { + *x = JSONSchema_FieldConfiguration{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *JSONSchema_FieldConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JSONSchema_FieldConfiguration) ProtoMessage() {} + +func (x *JSONSchema_FieldConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[25] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use JSONSchema_FieldConfiguration.ProtoReflect.Descriptor instead. +func (*JSONSchema_FieldConfiguration) Descriptor() ([]byte, []int) { + return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{11, 0} +} + +func (x *JSONSchema_FieldConfiguration) GetPathParamName() string { + if x != nil { + return x.PathParamName + } + return "" +} + +// If the security scheme is of type "oauth2", then the value is a list of +// scope names required for the execution. For other security scheme types, +// the array MUST be empty. +type SecurityRequirement_SecurityRequirementValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Scope []string `protobuf:"bytes,1,rep,name=scope,proto3" json:"scope,omitempty"` +} + +func (x *SecurityRequirement_SecurityRequirementValue) Reset() { + *x = SecurityRequirement_SecurityRequirementValue{} + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SecurityRequirement_SecurityRequirementValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityRequirement_SecurityRequirementValue) ProtoMessage() {} + +func (x *SecurityRequirement_SecurityRequirementValue) ProtoReflect() protoreflect.Message { + mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[30] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SecurityRequirement_SecurityRequirementValue.ProtoReflect.Descriptor instead. +func (*SecurityRequirement_SecurityRequirementValue) Descriptor() ([]byte, []int) { + return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP(), []int{15, 0} +} + +func (x *SecurityRequirement_SecurityRequirementValue) GetScope() []string { + if x != nil { + return x.Scope + } + return nil +} + +var File_protoc_gen_openapiv2_options_openapiv2_proto protoreflect.FileDescriptor + +var file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc = []byte{ + 0x0a, 0x2c, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x29, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, + 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb3, 0x08, 0x0a, 0x07, 0x53, 0x77, 0x61, 0x67, + 0x67, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x12, 0x43, 0x0a, + 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e, + 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x70, + 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x62, 0x61, 0x73, 0x65, 0x50, + 0x61, 0x74, 0x68, 0x12, 0x4b, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, + 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, + 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, + 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, + 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x12, 0x5f, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x12, 0x71, 0x0a, 0x14, 0x73, 0x65, 0x63, + 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, + 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, + 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, + 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5a, 0x0a, 0x08, + 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x08, + 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x42, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, + 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, + 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, + 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x54, 0x61, 0x67, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x65, 0x0a, 0x0d, + 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x0e, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, + 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, + 0x6f, 0x63, 0x73, 0x12, 0x62, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, + 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x2e, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x71, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x49, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0xd6, 0x07, + 0x0a, 0x09, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, + 0x61, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, + 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x0d, 0x65, + 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, + 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, + 0x63, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, + 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, + 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x12, 0x61, 0x0a, + 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x43, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, + 0x12, 0x4b, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, + 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x65, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x12, 0x1e, 0x0a, + 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x5a, 0x0a, + 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, + 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, + 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x64, 0x0a, 0x0a, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x55, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x0e, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, + 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x1a, 0x71, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x49, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, + 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x22, 0x62, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x73, 0x12, 0x54, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, + 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x22, 0xa3, 0x02, 0x0a, 0x0f, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x53, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x2e, 0x54, + 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x22, 0x45, 0x0a, + 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, + 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0a, + 0x0a, 0x06, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, + 0x54, 0x45, 0x47, 0x45, 0x52, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x42, 0x4f, 0x4f, 0x4c, 0x45, + 0x41, 0x4e, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, + 0x22, 0xd8, 0x01, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x0d, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x4a, 0x04, 0x08, + 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x4a, + 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x4a, 0x04, 0x08, 0x0a, 0x10, + 0x0b, 0x4a, 0x04, 0x08, 0x0b, 0x10, 0x0c, 0x4a, 0x04, 0x08, 0x0c, 0x10, 0x0d, 0x4a, 0x04, 0x08, + 0x0e, 0x10, 0x0f, 0x4a, 0x04, 0x08, 0x0f, 0x10, 0x10, 0x4a, 0x04, 0x08, 0x10, 0x10, 0x11, 0x4a, + 0x04, 0x08, 0x11, 0x10, 0x12, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0x9a, 0x05, 0x0a, 0x08, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x06, 0x73, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x5a, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, + 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, + 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x73, 0x12, 0x5d, 0x0a, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, + 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, + 0x12, 0x63, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, + 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x6d, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x47, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, + 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, + 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3b, 0x0a, 0x0d, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd6, 0x03, 0x0a, 0x04, 0x49, 0x6e, 0x66, + 0x6f, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x65, 0x72, + 0x6d, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x65, 0x72, 0x6d, 0x73, 0x4f, 0x66, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x12, 0x4c, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, + 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, + 0x74, 0x12, 0x4c, 0x0a, 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4c, + 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x52, 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x12, + 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x5f, 0x0a, 0x0a, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3f, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0x45, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, + 0x72, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x22, 0x2f, 0x0a, 0x07, 0x4c, 0x69, 0x63, 0x65, + 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x4b, 0x0a, 0x15, 0x45, 0x78, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0xaa, 0x02, 0x0a, 0x06, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x12, 0x56, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, + 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, + 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0a, 0x6a, + 0x73, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x24, 0x0a, 0x0d, 0x64, 0x69, 0x73, + 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x64, 0x69, 0x73, 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x12, + 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x65, 0x0a, 0x0d, + 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, + 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, + 0x6f, 0x63, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x4a, 0x04, 0x08, + 0x04, 0x10, 0x05, 0x22, 0xd7, 0x0a, 0x0a, 0x0a, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x65, 0x66, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x72, 0x65, 0x66, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, + 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, + 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, + 0x6e, 0x6c, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, + 0x6e, 0x6c, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x1f, 0x0a, + 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, 0x66, 0x12, 0x18, + 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, + 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, + 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, + 0x18, 0x0d, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, + 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, + 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, + 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, + 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, + 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x10, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, + 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, + 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, + 0x73, 0x18, 0x14, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, + 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x15, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, + 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x16, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, + 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, + 0x69, 0x65, 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x50, 0x72, + 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x69, 0x6e, 0x5f, + 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, + 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x1a, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x61, + 0x72, 0x72, 0x61, 0x79, 0x18, 0x22, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x61, 0x72, 0x72, 0x61, + 0x79, 0x12, 0x5f, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x23, 0x20, 0x03, 0x28, 0x0e, 0x32, + 0x4b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x52, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x24, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x6e, + 0x75, 0x6d, 0x18, 0x2e, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x7a, + 0x0a, 0x13, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe9, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, + 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x0a, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x30, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x1a, 0x3c, 0x0a, 0x12, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x70, 0x61, 0x74, 0x68, 0x5f, + 0x70, 0x61, 0x72, 0x61, 0x6d, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x2f, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x70, 0x61, 0x74, 0x68, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x4e, 0x61, 0x6d, 0x65, 0x1a, + 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x77, 0x0a, 0x15, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, + 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, + 0x41, 0x52, 0x52, 0x41, 0x59, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x42, 0x4f, 0x4f, 0x4c, 0x45, + 0x41, 0x4e, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x54, 0x45, 0x47, 0x45, 0x52, 0x10, + 0x03, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x55, 0x4c, 0x4c, 0x10, 0x04, 0x12, 0x0a, 0x0a, 0x06, 0x4e, + 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x05, 0x12, 0x0a, 0x0a, 0x06, 0x4f, 0x42, 0x4a, 0x45, 0x43, + 0x54, 0x10, 0x06, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x07, 0x4a, + 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x04, 0x10, + 0x05, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x4a, 0x04, 0x08, 0x13, 0x10, 0x14, 0x4a, 0x04, 0x08, + 0x17, 0x10, 0x18, 0x4a, 0x04, 0x08, 0x1b, 0x10, 0x1c, 0x4a, 0x04, 0x08, 0x1c, 0x10, 0x1d, 0x4a, + 0x04, 0x08, 0x1d, 0x10, 0x1e, 0x4a, 0x04, 0x08, 0x1e, 0x10, 0x22, 0x4a, 0x04, 0x08, 0x25, 0x10, + 0x2a, 0x4a, 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x2b, 0x10, 0x2e, 0x22, 0xd9, 0x02, + 0x0a, 0x03, 0x54, 0x61, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x0d, 0x65, + 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, + 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, + 0x63, 0x73, 0x12, 0x5e, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, + 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, + 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x54, 0x61, 0x67, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xf7, 0x01, 0x0a, 0x13, 0x53, 0x65, + 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x68, 0x0a, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x4c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, + 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x1a, 0x76, 0x0a, 0x0d, 0x53, + 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x4f, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, + 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x22, 0xff, 0x06, 0x0a, 0x0e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x12, 0x52, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, + 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x2e, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x4c, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3c, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, + 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, + 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x2e, 0x49, 0x6e, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x52, + 0x0a, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, + 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, + 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x2e, 0x46, 0x6c, 0x6f, 0x77, 0x52, 0x04, 0x66, 0x6c, + 0x6f, 0x77, 0x12, 0x2b, 0x0a, 0x11, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x61, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x6c, 0x12, + 0x1b, 0x0a, 0x09, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x49, 0x0a, 0x06, + 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, + 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x52, + 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x69, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4b, 0x0a, 0x04, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, + 0x44, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x41, 0x53, 0x49, + 0x43, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x50, 0x49, 0x5f, + 0x4b, 0x45, 0x59, 0x10, 0x02, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x41, + 0x55, 0x54, 0x48, 0x32, 0x10, 0x03, 0x22, 0x31, 0x0a, 0x02, 0x49, 0x6e, 0x12, 0x0e, 0x0a, 0x0a, + 0x49, 0x4e, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, + 0x49, 0x4e, 0x5f, 0x51, 0x55, 0x45, 0x52, 0x59, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x49, 0x4e, + 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x02, 0x22, 0x6a, 0x0a, 0x04, 0x46, 0x6c, 0x6f, + 0x77, 0x12, 0x10, 0x0a, 0x0c, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, + 0x44, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x49, 0x4d, 0x50, 0x4c, + 0x49, 0x43, 0x49, 0x54, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x50, + 0x41, 0x53, 0x53, 0x57, 0x4f, 0x52, 0x44, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x46, 0x4c, 0x4f, + 0x57, 0x5f, 0x41, 0x50, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x12, + 0x14, 0x0a, 0x10, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x43, + 0x4f, 0x44, 0x45, 0x10, 0x04, 0x22, 0xf6, 0x02, 0x0a, 0x13, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, + 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x8a, 0x01, + 0x0a, 0x14, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x69, + 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x57, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, + 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, + 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x63, + 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x1a, 0x30, 0x0a, 0x18, 0x53, 0x65, + 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x1a, 0x9f, 0x01, 0x0a, + 0x18, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x6d, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x57, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x96, + 0x01, 0x0a, 0x06, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x52, 0x0a, 0x05, 0x73, 0x63, 0x6f, + 0x70, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, + 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, + 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x1a, 0x38, 0x0a, + 0x0a, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x2a, 0x3b, 0x0a, 0x06, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, + 0x0a, 0x04, 0x48, 0x54, 0x54, 0x50, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x48, 0x54, 0x54, 0x50, + 0x53, 0x10, 0x02, 0x12, 0x06, 0x0a, 0x02, 0x57, 0x53, 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, 0x57, + 0x53, 0x53, 0x10, 0x04, 0x42, 0x48, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2d, 0x65, 0x63, 0x6f, 0x73, 0x79, 0x73, 0x74, 0x65, + 0x6d, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2d, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2f, 0x76, + 0x32, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescOnce sync.Once + file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescData = file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc +) + +func file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescGZIP() []byte { + file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescOnce.Do(func() { + file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescData = protoimpl.X.CompressGZIP(file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescData) + }) + return file_protoc_gen_openapiv2_options_openapiv2_proto_rawDescData +} + +var file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes = make([]protoimpl.EnumInfo, 6) +var file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes = make([]protoimpl.MessageInfo, 33) +var file_protoc_gen_openapiv2_options_openapiv2_proto_goTypes = []any{ + (Scheme)(0), // 0: grpc.gateway.protoc_gen_openapiv2.options.Scheme + (HeaderParameter_Type)(0), // 1: grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter.Type + (JSONSchema_JSONSchemaSimpleTypes)(0), // 2: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.JSONSchemaSimpleTypes + (SecurityScheme_Type)(0), // 3: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Type + (SecurityScheme_In)(0), // 4: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.In + (SecurityScheme_Flow)(0), // 5: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Flow + (*Swagger)(nil), // 6: grpc.gateway.protoc_gen_openapiv2.options.Swagger + (*Operation)(nil), // 7: grpc.gateway.protoc_gen_openapiv2.options.Operation + (*Parameters)(nil), // 8: grpc.gateway.protoc_gen_openapiv2.options.Parameters + (*HeaderParameter)(nil), // 9: grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter + (*Header)(nil), // 10: grpc.gateway.protoc_gen_openapiv2.options.Header + (*Response)(nil), // 11: grpc.gateway.protoc_gen_openapiv2.options.Response + (*Info)(nil), // 12: grpc.gateway.protoc_gen_openapiv2.options.Info + (*Contact)(nil), // 13: grpc.gateway.protoc_gen_openapiv2.options.Contact + (*License)(nil), // 14: grpc.gateway.protoc_gen_openapiv2.options.License + (*ExternalDocumentation)(nil), // 15: grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation + (*Schema)(nil), // 16: grpc.gateway.protoc_gen_openapiv2.options.Schema + (*JSONSchema)(nil), // 17: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema + (*Tag)(nil), // 18: grpc.gateway.protoc_gen_openapiv2.options.Tag + (*SecurityDefinitions)(nil), // 19: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions + (*SecurityScheme)(nil), // 20: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme + (*SecurityRequirement)(nil), // 21: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement + (*Scopes)(nil), // 22: grpc.gateway.protoc_gen_openapiv2.options.Scopes + nil, // 23: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry + nil, // 24: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry + nil, // 25: grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry + nil, // 26: grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry + nil, // 27: grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry + nil, // 28: grpc.gateway.protoc_gen_openapiv2.options.Response.ExamplesEntry + nil, // 29: grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry + nil, // 30: grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry + (*JSONSchema_FieldConfiguration)(nil), // 31: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.FieldConfiguration + nil, // 32: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry + nil, // 33: grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry + nil, // 34: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry + nil, // 35: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry + (*SecurityRequirement_SecurityRequirementValue)(nil), // 36: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementValue + nil, // 37: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry + nil, // 38: grpc.gateway.protoc_gen_openapiv2.options.Scopes.ScopeEntry + (*structpb.Value)(nil), // 39: google.protobuf.Value +} +var file_protoc_gen_openapiv2_options_openapiv2_proto_depIdxs = []int32{ + 12, // 0: grpc.gateway.protoc_gen_openapiv2.options.Swagger.info:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Info + 0, // 1: grpc.gateway.protoc_gen_openapiv2.options.Swagger.schemes:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scheme + 23, // 2: grpc.gateway.protoc_gen_openapiv2.options.Swagger.responses:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry + 19, // 3: grpc.gateway.protoc_gen_openapiv2.options.Swagger.security_definitions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions + 21, // 4: grpc.gateway.protoc_gen_openapiv2.options.Swagger.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement + 18, // 5: grpc.gateway.protoc_gen_openapiv2.options.Swagger.tags:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag + 15, // 6: grpc.gateway.protoc_gen_openapiv2.options.Swagger.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation + 24, // 7: grpc.gateway.protoc_gen_openapiv2.options.Swagger.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry + 15, // 8: grpc.gateway.protoc_gen_openapiv2.options.Operation.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation + 25, // 9: grpc.gateway.protoc_gen_openapiv2.options.Operation.responses:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry + 0, // 10: grpc.gateway.protoc_gen_openapiv2.options.Operation.schemes:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scheme + 21, // 11: grpc.gateway.protoc_gen_openapiv2.options.Operation.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement + 26, // 12: grpc.gateway.protoc_gen_openapiv2.options.Operation.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry + 8, // 13: grpc.gateway.protoc_gen_openapiv2.options.Operation.parameters:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Parameters + 9, // 14: grpc.gateway.protoc_gen_openapiv2.options.Parameters.headers:type_name -> grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter + 1, // 15: grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter.Type + 16, // 16: grpc.gateway.protoc_gen_openapiv2.options.Response.schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Schema + 27, // 17: grpc.gateway.protoc_gen_openapiv2.options.Response.headers:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry + 28, // 18: grpc.gateway.protoc_gen_openapiv2.options.Response.examples:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.ExamplesEntry + 29, // 19: grpc.gateway.protoc_gen_openapiv2.options.Response.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry + 13, // 20: grpc.gateway.protoc_gen_openapiv2.options.Info.contact:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Contact + 14, // 21: grpc.gateway.protoc_gen_openapiv2.options.Info.license:type_name -> grpc.gateway.protoc_gen_openapiv2.options.License + 30, // 22: grpc.gateway.protoc_gen_openapiv2.options.Info.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry + 17, // 23: grpc.gateway.protoc_gen_openapiv2.options.Schema.json_schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema + 15, // 24: grpc.gateway.protoc_gen_openapiv2.options.Schema.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation + 2, // 25: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.JSONSchemaSimpleTypes + 31, // 26: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.field_configuration:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.FieldConfiguration + 32, // 27: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry + 15, // 28: grpc.gateway.protoc_gen_openapiv2.options.Tag.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation + 33, // 29: grpc.gateway.protoc_gen_openapiv2.options.Tag.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry + 34, // 30: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry + 3, // 31: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Type + 4, // 32: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.in:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.In + 5, // 33: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.flow:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Flow + 22, // 34: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.scopes:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scopes + 35, // 35: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry + 37, // 36: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.security_requirement:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry + 38, // 37: grpc.gateway.protoc_gen_openapiv2.options.Scopes.scope:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scopes.ScopeEntry + 11, // 38: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response + 39, // 39: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry.value:type_name -> google.protobuf.Value + 11, // 40: grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response + 39, // 41: grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry.value:type_name -> google.protobuf.Value + 10, // 42: grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Header + 39, // 43: grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry.value:type_name -> google.protobuf.Value + 39, // 44: grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry.value:type_name -> google.protobuf.Value + 39, // 45: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry.value:type_name -> google.protobuf.Value + 39, // 46: grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry.value:type_name -> google.protobuf.Value + 20, // 47: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme + 39, // 48: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry.value:type_name -> google.protobuf.Value + 36, // 49: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementValue + 50, // [50:50] is the sub-list for method output_type + 50, // [50:50] is the sub-list for method input_type + 50, // [50:50] is the sub-list for extension type_name + 50, // [50:50] is the sub-list for extension extendee + 0, // [0:50] is the sub-list for field type_name +} + +func init() { file_protoc_gen_openapiv2_options_openapiv2_proto_init() } +func file_protoc_gen_openapiv2_options_openapiv2_proto_init() { + if File_protoc_gen_openapiv2_options_openapiv2_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc, + NumEnums: 6, + NumMessages: 33, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_protoc_gen_openapiv2_options_openapiv2_proto_goTypes, + DependencyIndexes: file_protoc_gen_openapiv2_options_openapiv2_proto_depIdxs, + EnumInfos: file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes, + MessageInfos: file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes, + }.Build() + File_protoc_gen_openapiv2_options_openapiv2_proto = out.File + file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc = nil + file_protoc_gen_openapiv2_options_openapiv2_proto_goTypes = nil + file_protoc_gen_openapiv2_options_openapiv2_proto_depIdxs = nil +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.proto b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.proto new file mode 100644 index 0000000000000..88f9ffd008dd1 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.proto @@ -0,0 +1,720 @@ +syntax = "proto3"; + +package grpc.gateway.protoc_gen_openapiv2.options; + +import "google/protobuf/struct.proto"; + +option go_package = "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options"; + +// Scheme describes the schemes supported by the OpenAPI Swagger +// and Operation objects. +enum Scheme { + UNKNOWN = 0; + HTTP = 1; + HTTPS = 2; + WS = 3; + WSS = 4; +} + +// `Swagger` is a representation of OpenAPI v2 specification's Swagger object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#swaggerObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// info: { +// title: "Echo API"; +// version: "1.0"; +// description: ""; +// contact: { +// name: "gRPC-Gateway project"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway"; +// email: "none@example.com"; +// }; +// license: { +// name: "BSD 3-Clause License"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE"; +// }; +// }; +// schemes: HTTPS; +// consumes: "application/json"; +// produces: "application/json"; +// }; +// +message Swagger { + // Specifies the OpenAPI Specification version being used. It can be + // used by the OpenAPI UI and other clients to interpret the API listing. The + // value MUST be "2.0". + string swagger = 1; + // Provides metadata about the API. The metadata can be used by the + // clients if needed. + Info info = 2; + // The host (name or ip) serving the API. This MUST be the host only and does + // not include the scheme nor sub-paths. It MAY include a port. If the host is + // not included, the host serving the documentation is to be used (including + // the port). The host does not support path templating. + string host = 3; + // The base path on which the API is served, which is relative to the host. If + // it is not included, the API is served directly under the host. The value + // MUST start with a leading slash (/). The basePath does not support path + // templating. + // Note that using `base_path` does not change the endpoint paths that are + // generated in the resulting OpenAPI file. If you wish to use `base_path` + // with relatively generated OpenAPI paths, the `base_path` prefix must be + // manually removed from your `google.api.http` paths and your code changed to + // serve the API from the `base_path`. + string base_path = 4; + // The transfer protocol of the API. Values MUST be from the list: "http", + // "https", "ws", "wss". If the schemes is not included, the default scheme to + // be used is the one used to access the OpenAPI definition itself. + repeated Scheme schemes = 5; + // A list of MIME types the APIs can consume. This is global to all APIs but + // can be overridden on specific API calls. Value MUST be as described under + // Mime Types. + repeated string consumes = 6; + // A list of MIME types the APIs can produce. This is global to all APIs but + // can be overridden on specific API calls. Value MUST be as described under + // Mime Types. + repeated string produces = 7; + // field 8 is reserved for 'paths'. + reserved 8; + // field 9 is reserved for 'definitions', which at this time are already + // exposed as and customizable as proto messages. + reserved 9; + // An object to hold responses that can be used across operations. This + // property does not define global responses for all operations. + map responses = 10; + // Security scheme definitions that can be used across the specification. + SecurityDefinitions security_definitions = 11; + // A declaration of which security schemes are applied for the API as a whole. + // The list of values describes alternative security schemes that can be used + // (that is, there is a logical OR between the security requirements). + // Individual operations can override this definition. + repeated SecurityRequirement security = 12; + // A list of tags for API documentation control. Tags can be used for logical + // grouping of operations by resources or any other qualifier. + repeated Tag tags = 13; + // Additional external documentation. + ExternalDocumentation external_docs = 14; + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + map extensions = 15; +} + +// `Operation` is a representation of OpenAPI v2 specification's Operation object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#operationObject +// +// Example: +// +// service EchoService { +// rpc Echo(SimpleMessage) returns (SimpleMessage) { +// option (google.api.http) = { +// get: "/v1/example/echo/{id}" +// }; +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { +// summary: "Get a message."; +// operation_id: "getMessage"; +// tags: "echo"; +// responses: { +// key: "200" +// value: { +// description: "OK"; +// } +// } +// }; +// } +// } +message Operation { + // A list of tags for API documentation control. Tags can be used for logical + // grouping of operations by resources or any other qualifier. + repeated string tags = 1; + // A short summary of what the operation does. For maximum readability in the + // swagger-ui, this field SHOULD be less than 120 characters. + string summary = 2; + // A verbose explanation of the operation behavior. GFM syntax can be used for + // rich text representation. + string description = 3; + // Additional external documentation for this operation. + ExternalDocumentation external_docs = 4; + // Unique string used to identify the operation. The id MUST be unique among + // all operations described in the API. Tools and libraries MAY use the + // operationId to uniquely identify an operation, therefore, it is recommended + // to follow common programming naming conventions. + string operation_id = 5; + // A list of MIME types the operation can consume. This overrides the consumes + // definition at the OpenAPI Object. An empty value MAY be used to clear the + // global definition. Value MUST be as described under Mime Types. + repeated string consumes = 6; + // A list of MIME types the operation can produce. This overrides the produces + // definition at the OpenAPI Object. An empty value MAY be used to clear the + // global definition. Value MUST be as described under Mime Types. + repeated string produces = 7; + // field 8 is reserved for 'parameters'. + reserved 8; + // The list of possible responses as they are returned from executing this + // operation. + map responses = 9; + // The transfer protocol for the operation. Values MUST be from the list: + // "http", "https", "ws", "wss". The value overrides the OpenAPI Object + // schemes definition. + repeated Scheme schemes = 10; + // Declares this operation to be deprecated. Usage of the declared operation + // should be refrained. Default value is false. + bool deprecated = 11; + // A declaration of which security schemes are applied for this operation. The + // list of values describes alternative security schemes that can be used + // (that is, there is a logical OR between the security requirements). This + // definition overrides any declared top-level security. To remove a top-level + // security declaration, an empty array can be used. + repeated SecurityRequirement security = 12; + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + map extensions = 13; + // Custom parameters such as HTTP request headers. + // See: https://swagger.io/docs/specification/2-0/describing-parameters/ + // and https://swagger.io/specification/v2/#parameter-object. + Parameters parameters = 14; +} + +// `Parameters` is a representation of OpenAPI v2 specification's parameters object. +// Note: This technically breaks compatibility with the OpenAPI 2 definition structure as we only +// allow header parameters to be set here since we do not want users specifying custom non-header +// parameters beyond those inferred from the Protobuf schema. +// See: https://swagger.io/specification/v2/#parameter-object +message Parameters { + // `Headers` is one or more HTTP header parameter. + // See: https://swagger.io/docs/specification/2-0/describing-parameters/#header-parameters + repeated HeaderParameter headers = 1; +} + +// `HeaderParameter` a HTTP header parameter. +// See: https://swagger.io/specification/v2/#parameter-object +message HeaderParameter { + // `Type` is a supported HTTP header type. + // See https://swagger.io/specification/v2/#parameterType. + enum Type { + UNKNOWN = 0; + STRING = 1; + NUMBER = 2; + INTEGER = 3; + BOOLEAN = 4; + } + + // `Name` is the header name. + string name = 1; + // `Description` is a short description of the header. + string description = 2; + // `Type` is the type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported. + // See: https://swagger.io/specification/v2/#parameterType. + Type type = 3; + // `Format` The extending format for the previously mentioned type. + string format = 4; + // `Required` indicates if the header is optional + bool required = 5; + // field 6 is reserved for 'items', but in OpenAPI-specific way. + reserved 6; + // field 7 is reserved `Collection Format`. Determines the format of the array if type array is used. + reserved 7; +} + +// `Header` is a representation of OpenAPI v2 specification's Header object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#headerObject +// +message Header { + // `Description` is a short description of the header. + string description = 1; + // The type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported. + string type = 2; + // `Format` The extending format for the previously mentioned type. + string format = 3; + // field 4 is reserved for 'items', but in OpenAPI-specific way. + reserved 4; + // field 5 is reserved `Collection Format` Determines the format of the array if type array is used. + reserved 5; + // `Default` Declares the value of the header that the server will use if none is provided. + // See: https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-6.2. + // Unlike JSON Schema this value MUST conform to the defined type for the header. + string default = 6; + // field 7 is reserved for 'maximum'. + reserved 7; + // field 8 is reserved for 'exclusiveMaximum'. + reserved 8; + // field 9 is reserved for 'minimum'. + reserved 9; + // field 10 is reserved for 'exclusiveMinimum'. + reserved 10; + // field 11 is reserved for 'maxLength'. + reserved 11; + // field 12 is reserved for 'minLength'. + reserved 12; + // 'Pattern' See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.3. + string pattern = 13; + // field 14 is reserved for 'maxItems'. + reserved 14; + // field 15 is reserved for 'minItems'. + reserved 15; + // field 16 is reserved for 'uniqueItems'. + reserved 16; + // field 17 is reserved for 'enum'. + reserved 17; + // field 18 is reserved for 'multipleOf'. + reserved 18; +} + +// `Response` is a representation of OpenAPI v2 specification's Response object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#responseObject +// +message Response { + // `Description` is a short description of the response. + // GFM syntax can be used for rich text representation. + string description = 1; + // `Schema` optionally defines the structure of the response. + // If `Schema` is not provided, it means there is no content to the response. + Schema schema = 2; + // `Headers` A list of headers that are sent with the response. + // `Header` name is expected to be a string in the canonical format of the MIME header key + // See: https://golang.org/pkg/net/textproto/#CanonicalMIMEHeaderKey + map headers = 3; + // `Examples` gives per-mimetype response examples. + // See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#example-object + map examples = 4; + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + map extensions = 5; +} + +// `Info` is a representation of OpenAPI v2 specification's Info object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#infoObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// info: { +// title: "Echo API"; +// version: "1.0"; +// description: ""; +// contact: { +// name: "gRPC-Gateway project"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway"; +// email: "none@example.com"; +// }; +// license: { +// name: "BSD 3-Clause License"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE"; +// }; +// }; +// ... +// }; +// +message Info { + // The title of the application. + string title = 1; + // A short description of the application. GFM syntax can be used for rich + // text representation. + string description = 2; + // The Terms of Service for the API. + string terms_of_service = 3; + // The contact information for the exposed API. + Contact contact = 4; + // The license information for the exposed API. + License license = 5; + // Provides the version of the application API (not to be confused + // with the specification version). + string version = 6; + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + map extensions = 7; +} + +// `Contact` is a representation of OpenAPI v2 specification's Contact object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#contactObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// info: { +// ... +// contact: { +// name: "gRPC-Gateway project"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway"; +// email: "none@example.com"; +// }; +// ... +// }; +// ... +// }; +// +message Contact { + // The identifying name of the contact person/organization. + string name = 1; + // The URL pointing to the contact information. MUST be in the format of a + // URL. + string url = 2; + // The email address of the contact person/organization. MUST be in the format + // of an email address. + string email = 3; +} + +// `License` is a representation of OpenAPI v2 specification's License object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#licenseObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// info: { +// ... +// license: { +// name: "BSD 3-Clause License"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE"; +// }; +// ... +// }; +// ... +// }; +// +message License { + // The license name used for the API. + string name = 1; + // A URL to the license used for the API. MUST be in the format of a URL. + string url = 2; +} + +// `ExternalDocumentation` is a representation of OpenAPI v2 specification's +// ExternalDocumentation object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#externalDocumentationObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// ... +// external_docs: { +// description: "More about gRPC-Gateway"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway"; +// } +// ... +// }; +// +message ExternalDocumentation { + // A short description of the target documentation. GFM syntax can be used for + // rich text representation. + string description = 1; + // The URL for the target documentation. Value MUST be in the format + // of a URL. + string url = 2; +} + +// `Schema` is a representation of OpenAPI v2 specification's Schema object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject +// +message Schema { + JSONSchema json_schema = 1; + // Adds support for polymorphism. The discriminator is the schema property + // name that is used to differentiate between other schema that inherit this + // schema. The property name used MUST be defined at this schema and it MUST + // be in the required property list. When used, the value MUST be the name of + // this schema or any schema that inherits it. + string discriminator = 2; + // Relevant only for Schema "properties" definitions. Declares the property as + // "read only". This means that it MAY be sent as part of a response but MUST + // NOT be sent as part of the request. Properties marked as readOnly being + // true SHOULD NOT be in the required list of the defined schema. Default + // value is false. + bool read_only = 3; + // field 4 is reserved for 'xml'. + reserved 4; + // Additional external documentation for this schema. + ExternalDocumentation external_docs = 5; + // A free-form property to include an example of an instance for this schema in JSON. + // This is copied verbatim to the output. + string example = 6; +} + +// `JSONSchema` represents properties from JSON Schema taken, and as used, in +// the OpenAPI v2 spec. +// +// This includes changes made by OpenAPI v2. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject +// +// See also: https://cswr.github.io/JsonSchema/spec/basic_types/, +// https://github.com/json-schema-org/json-schema-spec/blob/master/schema.json +// +// Example: +// +// message SimpleMessage { +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema) = { +// json_schema: { +// title: "SimpleMessage" +// description: "A simple message." +// required: ["id"] +// } +// }; +// +// // Id represents the message identifier. +// string id = 1; [ +// (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = { +// description: "The unique identifier of the simple message." +// }]; +// } +// +message JSONSchema { + // field 1 is reserved for '$id', omitted from OpenAPI v2. + reserved 1; + // field 2 is reserved for '$schema', omitted from OpenAPI v2. + reserved 2; + // Ref is used to define an external reference to include in the message. + // This could be a fully qualified proto message reference, and that type must + // be imported into the protofile. If no message is identified, the Ref will + // be used verbatim in the output. + // For example: + // `ref: ".google.protobuf.Timestamp"`. + string ref = 3; + // field 4 is reserved for '$comment', omitted from OpenAPI v2. + reserved 4; + // The title of the schema. + string title = 5; + // A short description of the schema. + string description = 6; + string default = 7; + bool read_only = 8; + // A free-form property to include a JSON example of this field. This is copied + // verbatim to the output swagger.json. Quotes must be escaped. + // This property is the same for 2.0 and 3.0.0 https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/3.0.0.md#schemaObject https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject + string example = 9; + double multiple_of = 10; + // Maximum represents an inclusive upper limit for a numeric instance. The + // value of MUST be a number, + double maximum = 11; + bool exclusive_maximum = 12; + // minimum represents an inclusive lower limit for a numeric instance. The + // value of MUST be a number, + double minimum = 13; + bool exclusive_minimum = 14; + uint64 max_length = 15; + uint64 min_length = 16; + string pattern = 17; + // field 18 is reserved for 'additionalItems', omitted from OpenAPI v2. + reserved 18; + // field 19 is reserved for 'items', but in OpenAPI-specific way. + // TODO(ivucica): add 'items'? + reserved 19; + uint64 max_items = 20; + uint64 min_items = 21; + bool unique_items = 22; + // field 23 is reserved for 'contains', omitted from OpenAPI v2. + reserved 23; + uint64 max_properties = 24; + uint64 min_properties = 25; + repeated string required = 26; + // field 27 is reserved for 'additionalProperties', but in OpenAPI-specific + // way. TODO(ivucica): add 'additionalProperties'? + reserved 27; + // field 28 is reserved for 'definitions', omitted from OpenAPI v2. + reserved 28; + // field 29 is reserved for 'properties', but in OpenAPI-specific way. + // TODO(ivucica): add 'additionalProperties'? + reserved 29; + // following fields are reserved, as the properties have been omitted from + // OpenAPI v2: + // patternProperties, dependencies, propertyNames, const + reserved 30 to 33; + // Items in 'array' must be unique. + repeated string array = 34; + + enum JSONSchemaSimpleTypes { + UNKNOWN = 0; + ARRAY = 1; + BOOLEAN = 2; + INTEGER = 3; + NULL = 4; + NUMBER = 5; + OBJECT = 6; + STRING = 7; + } + + repeated JSONSchemaSimpleTypes type = 35; + // `Format` + string format = 36; + // following fields are reserved, as the properties have been omitted from + // OpenAPI v2: contentMediaType, contentEncoding, if, then, else + reserved 37 to 41; + // field 42 is reserved for 'allOf', but in OpenAPI-specific way. + // TODO(ivucica): add 'allOf'? + reserved 42; + // following fields are reserved, as the properties have been omitted from + // OpenAPI v2: + // anyOf, oneOf, not + reserved 43 to 45; + // Items in `enum` must be unique https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.5.1 + repeated string enum = 46; + + // Additional field level properties used when generating the OpenAPI v2 file. + FieldConfiguration field_configuration = 1001; + + // 'FieldConfiguration' provides additional field level properties used when generating the OpenAPI v2 file. + // These properties are not defined by OpenAPIv2, but they are used to control the generation. + message FieldConfiguration { + // Alternative parameter name when used as path parameter. If set, this will + // be used as the complete parameter name when this field is used as a path + // parameter. Use this to avoid having auto generated path parameter names + // for overlapping paths. + string path_param_name = 47; + } + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + map extensions = 48; +} + +// `Tag` is a representation of OpenAPI v2 specification's Tag object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#tagObject +// +message Tag { + // The name of the tag. Use it to allow override of the name of a + // global Tag object, then use that name to reference the tag throughout the + // OpenAPI file. + string name = 1; + // A short description for the tag. GFM syntax can be used for rich text + // representation. + string description = 2; + // Additional external documentation for this tag. + ExternalDocumentation external_docs = 3; + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + map extensions = 4; +} + +// `SecurityDefinitions` is a representation of OpenAPI v2 specification's +// Security Definitions object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityDefinitionsObject +// +// A declaration of the security schemes available to be used in the +// specification. This does not enforce the security schemes on the operations +// and only serves to provide the relevant details for each scheme. +message SecurityDefinitions { + // A single security scheme definition, mapping a "name" to the scheme it + // defines. + map security = 1; +} + +// `SecurityScheme` is a representation of OpenAPI v2 specification's +// Security Scheme object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securitySchemeObject +// +// Allows the definition of a security scheme that can be used by the +// operations. Supported schemes are basic authentication, an API key (either as +// a header or as a query parameter) and OAuth2's common flows (implicit, +// password, application and access code). +message SecurityScheme { + // The type of the security scheme. Valid values are "basic", + // "apiKey" or "oauth2". + enum Type { + TYPE_INVALID = 0; + TYPE_BASIC = 1; + TYPE_API_KEY = 2; + TYPE_OAUTH2 = 3; + } + + // The location of the API key. Valid values are "query" or "header". + enum In { + IN_INVALID = 0; + IN_QUERY = 1; + IN_HEADER = 2; + } + + // The flow used by the OAuth2 security scheme. Valid values are + // "implicit", "password", "application" or "accessCode". + enum Flow { + FLOW_INVALID = 0; + FLOW_IMPLICIT = 1; + FLOW_PASSWORD = 2; + FLOW_APPLICATION = 3; + FLOW_ACCESS_CODE = 4; + } + + // The type of the security scheme. Valid values are "basic", + // "apiKey" or "oauth2". + Type type = 1; + // A short description for security scheme. + string description = 2; + // The name of the header or query parameter to be used. + // Valid for apiKey. + string name = 3; + // The location of the API key. Valid values are "query" or + // "header". + // Valid for apiKey. + In in = 4; + // The flow used by the OAuth2 security scheme. Valid values are + // "implicit", "password", "application" or "accessCode". + // Valid for oauth2. + Flow flow = 5; + // The authorization URL to be used for this flow. This SHOULD be in + // the form of a URL. + // Valid for oauth2/implicit and oauth2/accessCode. + string authorization_url = 6; + // The token URL to be used for this flow. This SHOULD be in the + // form of a URL. + // Valid for oauth2/password, oauth2/application and oauth2/accessCode. + string token_url = 7; + // The available scopes for the OAuth2 security scheme. + // Valid for oauth2. + Scopes scopes = 8; + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + map extensions = 9; +} + +// `SecurityRequirement` is a representation of OpenAPI v2 specification's +// Security Requirement object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityRequirementObject +// +// Lists the required security schemes to execute this operation. The object can +// have multiple security schemes declared in it which are all required (that +// is, there is a logical AND between the schemes). +// +// The name used for each property MUST correspond to a security scheme +// declared in the Security Definitions. +message SecurityRequirement { + // If the security scheme is of type "oauth2", then the value is a list of + // scope names required for the execution. For other security scheme types, + // the array MUST be empty. + message SecurityRequirementValue { + repeated string scope = 1; + } + // Each name must correspond to a security scheme which is declared in + // the Security Definitions. If the security scheme is of type "oauth2", + // then the value is a list of scope names required for the execution. + // For other security scheme types, the array MUST be empty. + map security_requirement = 1; +} + +// `Scopes` is a representation of OpenAPI v2 specification's Scopes object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#scopesObject +// +// Lists the available scopes for an OAuth2 security scheme. +message Scopes { + // Maps between a name of a scope to a short description of it (as the value + // of the property). + map scope = 1; +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.swagger.json b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.swagger.json new file mode 100644 index 0000000000000..97dd6459ddbce --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.swagger.json @@ -0,0 +1,44 @@ +{ + "swagger": "2.0", + "info": { + "title": "protoc-gen-openapiv2/options/openapiv2.proto", + "version": "version not set" + }, + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": {}, + "definitions": { + "protobufAny": { + "type": "object", + "properties": { + "@type": { + "type": "string" + } + }, + "additionalProperties": {} + }, + "rpcStatus": { + "type": "object", + "properties": { + "code": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + }, + "details": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/protobufAny" + } + } + } + } + } +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel index 78d7c9f5c883b..a65d88eb86581 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel @@ -73,7 +73,7 @@ go_test( "@org_golang_google_genproto_googleapis_api//httpbody", "@org_golang_google_genproto_googleapis_rpc//errdetails", "@org_golang_google_genproto_googleapis_rpc//status", - "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//health/grpc_health_v1", "@org_golang_google_grpc//metadata", diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go index 5dd4e447862e5..2f2b342431d6f 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go @@ -49,6 +49,7 @@ var malformedHTTPHeaders = map[string]struct{}{ type ( rpcMethodKey struct{} httpPathPatternKey struct{} + httpPatternKey struct{} AnnotateContextOption func(ctx context.Context) context.Context ) @@ -404,3 +405,13 @@ func HTTPPathPattern(ctx context.Context) (string, bool) { func withHTTPPathPattern(ctx context.Context, httpPathPattern string) context.Context { return context.WithValue(ctx, httpPathPatternKey{}, httpPathPattern) } + +// HTTPPattern returns the HTTP path pattern struct relating to the HTTP handler, if one exists. +func HTTPPattern(ctx context.Context) (Pattern, bool) { + v, ok := ctx.Value(httpPatternKey{}).(Pattern) + return v, ok +} + +func withHTTPPattern(ctx context.Context, httpPattern Pattern) context.Context { + return context.WithValue(ctx, httpPatternKey{}, httpPattern) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go index d7b15fcfb3f85..2e50082ad1163 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go @@ -94,7 +94,7 @@ func Int64(val string) (int64, error) { } // Int64Slice converts 'val' where individual integers are separated by -// 'sep' into a int64 slice. +// 'sep' into an int64 slice. func Int64Slice(val, sep string) ([]int64, error) { s := strings.Split(val, sep) values := make([]int64, len(s)) @@ -118,7 +118,7 @@ func Int32(val string) (int32, error) { } // Int32Slice converts 'val' where individual integers are separated by -// 'sep' into a int32 slice. +// 'sep' into an int32 slice. func Int32Slice(val, sep string) ([]int32, error) { s := strings.Split(val, sep) values := make([]int32, len(s)) @@ -190,7 +190,7 @@ func Bytes(val string) ([]byte, error) { } // BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe -// base64 without padding, are separated by 'sep' into a slice of bytes slices slice. +// base64 without padding, are separated by 'sep' into a slice of byte slices. func BytesSlice(val, sep string) ([][]byte, error) { s := strings.Split(val, sep) values := make([][]byte, len(s)) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go index 5682998699ae4..41cd4f5030e4f 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go @@ -81,6 +81,21 @@ func HTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.R mux.errorHandler(ctx, mux, marshaler, w, r, err) } +// HTTPStreamError uses the mux-configured stream error handler to notify error to the client without closing the connection. +func HTTPStreamError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) { + st := mux.streamErrorHandler(ctx, err) + msg := errorChunk(st) + buf, err := marshaler.Marshal(msg) + if err != nil { + grpclog.Errorf("Failed to marshal an error: %v", err) + return + } + if _, err := w.Write(buf); err != nil { + grpclog.Errorf("Failed to notify error to client: %v", err) + return + } +} + // DefaultHTTPErrorHandler is the default error handler. // If "err" is a gRPC Status, the function replies with the status code mapped by HTTPStatusFromCode. // If "err" is a HTTPStatusError, the function replies with the status code provide by that struct. This is @@ -93,6 +108,7 @@ func HTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.R func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) { // return Internal when Marshal failed const fallback = `{"code": 13, "message": "failed to marshal error message"}` + const fallbackRewriter = `{"code": 13, "message": "failed to rewrite error message"}` var customStatus *HTTPStatusError if errors.As(err, &customStatus) { @@ -100,19 +116,28 @@ func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marsh } s := status.Convert(err) - pb := s.Proto() w.Header().Del("Trailer") w.Header().Del("Transfer-Encoding") - contentType := marshaler.ContentType(pb) + respRw, err := mux.forwardResponseRewriter(ctx, s.Proto()) + if err != nil { + grpclog.Errorf("Failed to rewrite error message %q: %v", s, err) + w.WriteHeader(http.StatusInternalServerError) + if _, err := io.WriteString(w, fallbackRewriter); err != nil { + grpclog.Errorf("Failed to write response: %v", err) + } + return + } + + contentType := marshaler.ContentType(respRw) w.Header().Set("Content-Type", contentType) if s.Code() == codes.Unauthenticated { w.Header().Set("WWW-Authenticate", s.Message()) } - buf, merr := marshaler.Marshal(pb) + buf, merr := marshaler.Marshal(respRw) if merr != nil { grpclog.Errorf("Failed to marshal error message %q: %v", s, merr) w.WriteHeader(http.StatusInternalServerError) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go index 9005d6a0bf46c..2fcd7af3c40ed 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go @@ -155,7 +155,7 @@ func buildPathsBlindly(name string, in interface{}) []string { return paths } -// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask +// fieldMaskPathItem stores an in-progress deconstruction of a path for a fieldmask type fieldMaskPathItem struct { // the list of prior fields leading up to node connected by dots path string diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go index de1eef1f4f858..9f50a569e9bab 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go @@ -3,6 +3,7 @@ package runtime import ( "context" "errors" + "fmt" "io" "net/http" "net/textproto" @@ -55,20 +56,27 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal return } + respRw, err := mux.forwardResponseRewriter(ctx, resp) + if err != nil { + grpclog.Errorf("Rewrite error: %v", err) + handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter) + return + } + if !wroteHeader { - w.Header().Set("Content-Type", marshaler.ContentType(resp)) + w.Header().Set("Content-Type", marshaler.ContentType(respRw)) } var buf []byte - httpBody, isHTTPBody := resp.(*httpbody.HttpBody) + httpBody, isHTTPBody := respRw.(*httpbody.HttpBody) switch { - case resp == nil: + case respRw == nil: buf, err = marshaler.Marshal(errorChunk(status.New(codes.Internal, "empty response"))) case isHTTPBody: buf = httpBody.GetData() default: - result := map[string]interface{}{"result": resp} - if rb, ok := resp.(responseBody); ok { + result := map[string]interface{}{"result": respRw} + if rb, ok := respRw.(responseBody); ok { result["result"] = rb.XXX_ResponseBody() } @@ -164,12 +172,17 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha HTTPError(ctx, mux, marshaler, w, req, err) return } + respRw, err := mux.forwardResponseRewriter(ctx, resp) + if err != nil { + grpclog.Errorf("Rewrite error: %v", err) + HTTPError(ctx, mux, marshaler, w, req, err) + return + } var buf []byte - var err error - if rb, ok := resp.(responseBody); ok { + if rb, ok := respRw.(responseBody); ok { buf, err = marshaler.Marshal(rb.XXX_ResponseBody()) } else { - buf, err = marshaler.Marshal(resp) + buf, err = marshaler.Marshal(respRw) } if err != nil { grpclog.Errorf("Marshal error: %v", err) @@ -201,8 +214,7 @@ func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, re } for _, opt := range opts { if err := opt(ctx, w, resp); err != nil { - grpclog.Errorf("Error handling ForwardResponseOptions: %v", err) - return err + return fmt.Errorf("error handling ForwardResponseOptions: %w", err) } } return nil diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go index 0b051e6e894a3..07c28112c8993 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go @@ -86,8 +86,8 @@ func (m marshalerRegistry) add(mime string, marshaler Marshaler) error { // It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces. // // For example, you could allow the client to specify the use of the runtime.JSONPb marshaler -// with a "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler -// with a "application/json" Content-Type. +// with an "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler +// with an "application/json" Content-Type. // "*" can be used to match any Content-Type. // This can be attached to a ServerMux with the marshaler option. func makeMarshalerMIMERegistry() marshalerRegistry { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go index ed9a7e4387dcf..60c2065ddcbb8 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go @@ -48,12 +48,19 @@ var encodedPathSplitter = regexp.MustCompile("(/|%2F)") // A HandlerFunc handles a specific pair of path pattern and HTTP method. type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string) +// A Middleware handler wraps another HandlerFunc to do some pre- and/or post-processing of the request. This is used as an alternative to gRPC interceptors when using the direct-to-implementation +// registration methods. It is generally recommended to use gRPC client or server interceptors instead +// where possible. +type Middleware func(HandlerFunc) HandlerFunc + // ServeMux is a request multiplexer for grpc-gateway. // It matches http requests to patterns and invokes the corresponding handler. type ServeMux struct { // handlers maps HTTP method to a list of handlers. handlers map[string][]handler + middlewares []Middleware forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error + forwardResponseRewriter ForwardResponseRewriter marshalers marshalerRegistry incomingHeaderMatcher HeaderMatcherFunc outgoingHeaderMatcher HeaderMatcherFunc @@ -69,6 +76,24 @@ type ServeMux struct { // ServeMuxOption is an option that can be given to a ServeMux on construction. type ServeMuxOption func(*ServeMux) +// ForwardResponseRewriter is the signature of a function that is capable of rewriting messages +// before they are forwarded in a unary, stream, or error response. +type ForwardResponseRewriter func(ctx context.Context, response proto.Message) (any, error) + +// WithForwardResponseRewriter returns a ServeMuxOption that allows for implementers to insert logic +// that can rewrite the final response before it is forwarded. +// +// The response rewriter function is called during unary message forwarding, stream message +// forwarding and when errors are being forwarded. +// +// NOTE: Using this option will likely make what is generated by `protoc-gen-openapiv2` incorrect. +// Since this option involves making runtime changes to the response shape or type. +func WithForwardResponseRewriter(fwdResponseRewriter ForwardResponseRewriter) ServeMuxOption { + return func(sm *ServeMux) { + sm.forwardResponseRewriter = fwdResponseRewriter + } +} + // WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption. // // forwardResponseOption is an option that will be called on the relevant context.Context, @@ -89,6 +114,15 @@ func WithUnescapingMode(mode UnescapingMode) ServeMuxOption { } } +// WithMiddlewares sets server middleware for all handlers. This is useful as an alternative to gRPC +// interceptors when using the direct-to-implementation registration methods and cannot rely +// on gRPC interceptors. It's recommended to use gRPC interceptors instead if possible. +func WithMiddlewares(middlewares ...Middleware) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.middlewares = append(serveMux.middlewares, middlewares...) + } +} + // SetQueryParameterParser sets the query parameter parser, used to populate message from query parameters. // Configuring this will mean the generated OpenAPI output is no longer correct, and it should be // done with careful consideration. @@ -277,13 +311,14 @@ func WithHealthzEndpoint(healthCheckClient grpc_health_v1.HealthClient) ServeMux // NewServeMux returns a new ServeMux whose internal mapping is empty. func NewServeMux(opts ...ServeMuxOption) *ServeMux { serveMux := &ServeMux{ - handlers: make(map[string][]handler), - forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0), - marshalers: makeMarshalerMIMERegistry(), - errorHandler: DefaultHTTPErrorHandler, - streamErrorHandler: DefaultStreamErrorHandler, - routingErrorHandler: DefaultRoutingErrorHandler, - unescapingMode: UnescapingModeDefault, + handlers: make(map[string][]handler), + forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0), + forwardResponseRewriter: func(ctx context.Context, response proto.Message) (any, error) { return response, nil }, + marshalers: makeMarshalerMIMERegistry(), + errorHandler: DefaultHTTPErrorHandler, + streamErrorHandler: DefaultStreamErrorHandler, + routingErrorHandler: DefaultRoutingErrorHandler, + unescapingMode: UnescapingModeDefault, } for _, opt := range opts { @@ -305,6 +340,9 @@ func NewServeMux(opts ...ServeMuxOption) *ServeMux { // Handle associates "h" to the pair of HTTP method and path pattern. func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) { + if len(s.middlewares) > 0 { + h = chainMiddlewares(s.middlewares)(h) + } s.handlers[meth] = append([]handler{{pat: pat, h: h}}, s.handlers[meth]...) } @@ -405,7 +443,7 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { } continue } - h.h(w, r, pathParams) + s.handleHandler(h, w, r, pathParams) return } @@ -458,7 +496,7 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr) return } - h.h(w, r, pathParams) + s.handleHandler(h, w, r, pathParams) return } _, outboundMarshaler := MarshalerForRequest(s, r) @@ -484,3 +522,16 @@ type handler struct { pat Pattern h HandlerFunc } + +func (s *ServeMux) handleHandler(h handler, w http.ResponseWriter, r *http.Request, pathParams map[string]string) { + h.h(w, r.WithContext(withHTTPPattern(r.Context(), h.pat)), pathParams) +} + +func chainMiddlewares(mws []Middleware) Middleware { + return func(next HandlerFunc) HandlerFunc { + for i := len(mws); i > 0; i-- { + next = mws[i-1](next) + } + return next + } +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go index d549407f20fbf..f710036b350c5 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go @@ -40,7 +40,7 @@ func Float32P(val string) (*float32, error) { } // Int64P parses the given string representation of an integer -// and returns a pointer to a int64 whose value is same as the parsed integer. +// and returns a pointer to an int64 whose value is same as the parsed integer. func Int64P(val string) (*int64, error) { i, err := Int64(val) if err != nil { @@ -50,7 +50,7 @@ func Int64P(val string) (*int64, error) { } // Int32P parses the given string representation of an integer -// and returns a pointer to a int32 whose value is same as the parsed integer. +// and returns a pointer to an int32 whose value is same as the parsed integer. func Int32P(val string) (*int32, error) { i, err := Int32(val) if err != nil { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go index dfe7de4864ab1..38ca39cc53809 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go @@ -1,6 +1,6 @@ package utilities -// An OpCode is a opcode of compiled path patterns. +// OpCode is an opcode of compiled path patterns. type OpCode int // These constants are the valid values of OpCode. diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go index d224ab776c0cb..66aa5f2dcc578 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go @@ -5,7 +5,7 @@ import ( "strings" ) -// flagInterface is an cut down interface to `flag` +// flagInterface is a cut down interface to `flag` type flagInterface interface { Var(value flag.Value, name string, usage string) } diff --git a/vendor/github.com/klauspost/compress/.gitattributes b/vendor/github.com/klauspost/compress/.gitattributes new file mode 100644 index 0000000000000..402433593c09c --- /dev/null +++ b/vendor/github.com/klauspost/compress/.gitattributes @@ -0,0 +1,2 @@ +* -text +*.bin -text -diff diff --git a/vendor/github.com/klauspost/compress/.gitignore b/vendor/github.com/klauspost/compress/.gitignore new file mode 100644 index 0000000000000..d31b378152793 --- /dev/null +++ b/vendor/github.com/klauspost/compress/.gitignore @@ -0,0 +1,32 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +/s2/cmd/_s2sx/sfx-exe + +# Linux perf files +perf.data +perf.data.old + +# gdb history +.gdb_history diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml new file mode 100644 index 0000000000000..a22953805c633 --- /dev/null +++ b/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -0,0 +1,123 @@ +# This is an example goreleaser.yaml file with some sane defaults. +# Make sure to check the documentation at http://goreleaser.com +before: + hooks: + - ./gen.sh + +builds: + - + id: "s2c" + binary: s2c + main: ./s2/cmd/s2c/main.go + flags: + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + - + id: "s2d" + binary: s2d + main: ./s2/cmd/s2d/main.go + flags: + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + - + id: "s2sx" + binary: s2sx + main: ./s2/cmd/_s2sx/main.go + flags: + - -modfile=s2sx.mod + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + +archives: + - + id: s2-binaries + name_template: "s2-{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" + format_overrides: + - goos: windows + format: zip + files: + - unpack/* + - s2/LICENSE + - s2/README.md +checksum: + name_template: 'checksums.txt' +snapshot: + name_template: "{{ .Tag }}-next" +changelog: + sort: asc + filters: + exclude: + - '^doc:' + - '^docs:' + - '^test:' + - '^tests:' + - '^Update\sREADME.md' + +nfpms: + - + file_name_template: "s2_package__{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" + vendor: Klaus Post + homepage: https://github.com/klauspost/compress + maintainer: Klaus Post + description: S2 Compression Tool + license: BSD 3-Clause + formats: + - deb + - rpm diff --git a/LICENSES/vendor/github.com/grpc-ecosystem/go-grpc-middleware/LICENSE b/vendor/github.com/klauspost/compress/LICENSE similarity index 67% rename from LICENSES/vendor/github.com/grpc-ecosystem/go-grpc-middleware/LICENSE rename to vendor/github.com/klauspost/compress/LICENSE index 0718ddf0073fe..87d55747778c5 100644 --- a/LICENSES/vendor/github.com/grpc-ecosystem/go-grpc-middleware/LICENSE +++ b/vendor/github.com/klauspost/compress/LICENSE @@ -1,6 +1,37 @@ -= vendor/github.com/grpc-ecosystem/go-grpc-middleware licensed under: = - - Apache License +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2019 Klaus Post. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------ + +Files: gzhttp/* + + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -188,7 +219,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright 2016-2017 The New York Times Company Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -201,4 +232,73 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -= vendor/github.com/grpc-ecosystem/go-grpc-middleware/LICENSE 7ab5c73bb7e4679b16dd7c11b3559acf + +------------------ + +Files: s2/cmd/internal/readahead/* + +The MIT License (MIT) + +Copyright (c) 2015 Klaus Post + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--------------------- +Files: snappy/* +Files: internal/snapref/* + +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +----------------- + +Files: s2/cmd/internal/filepathx/* + +Copyright 2016 The filepathx Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md new file mode 100644 index 0000000000000..05c7359e481fe --- /dev/null +++ b/vendor/github.com/klauspost/compress/README.md @@ -0,0 +1,700 @@ +# compress + +This package provides various compression algorithms. + +* [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression in pure Go. +* [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) is a high performance replacement for Snappy. +* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). +* [snappy](https://github.com/klauspost/compress/tree/master/snappy) is a drop-in replacement for `github.com/golang/snappy` offering better compression and concurrent streams. +* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding. +* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently. +* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation. + +[![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories) +[![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml) +[![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge) + +# changelog + +* Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6) + * zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923 + * s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925 + +* Jan 26th, 2024 - [v1.17.5](https://github.com/klauspost/compress/releases/tag/v1.17.5) + * flate: Fix reset with dictionary on custom window encodes https://github.com/klauspost/compress/pull/912 + * zstd: Add Frame header encoding and stripping https://github.com/klauspost/compress/pull/908 + * zstd: Limit better/best default window to 8MB https://github.com/klauspost/compress/pull/913 + * zstd: Speed improvements by @greatroar in https://github.com/klauspost/compress/pull/896 https://github.com/klauspost/compress/pull/910 + * s2: Fix callbacks for skippable blocks and disallow 0xfe (Padding) by @Jille in https://github.com/klauspost/compress/pull/916 https://github.com/klauspost/compress/pull/917 +https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/compress/pull/918 + +* Dec 1st, 2023 - [v1.17.4](https://github.com/klauspost/compress/releases/tag/v1.17.4) + * huff0: Speed up symbol counting by @greatroar in https://github.com/klauspost/compress/pull/887 + * huff0: Remove byteReader by @greatroar in https://github.com/klauspost/compress/pull/886 + * gzhttp: Allow overriding decompression on transport https://github.com/klauspost/compress/pull/892 + * gzhttp: Clamp compression level https://github.com/klauspost/compress/pull/890 + * gzip: Error out if reserved bits are set https://github.com/klauspost/compress/pull/891 + +* Nov 15th, 2023 - [v1.17.3](https://github.com/klauspost/compress/releases/tag/v1.17.3) + * fse: Fix max header size https://github.com/klauspost/compress/pull/881 + * zstd: Improve better/best compression https://github.com/klauspost/compress/pull/877 + * gzhttp: Fix missing content type on Close https://github.com/klauspost/compress/pull/883 + +* Oct 22nd, 2023 - [v1.17.2](https://github.com/klauspost/compress/releases/tag/v1.17.2) + * zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876 + +* Oct 14th, 2023 - [v1.17.1](https://github.com/klauspost/compress/releases/tag/v1.17.1) + * s2: Fix S2 "best" dictionary wrong encoding by @klauspost in https://github.com/klauspost/compress/pull/871 + * flate: Reduce allocations in decompressor and minor code improvements by @fakefloordiv in https://github.com/klauspost/compress/pull/869 + * s2: Fix EstimateBlockSize on 6&7 length input by @klauspost in https://github.com/klauspost/compress/pull/867 + +* Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0) + * Add experimental dictionary builder https://github.com/klauspost/compress/pull/853 + * Add xerial snappy read/writer https://github.com/klauspost/compress/pull/838 + * flate: Add limited window compression https://github.com/klauspost/compress/pull/843 + * s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839 + * flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837 + * gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860 + +
+ See changes to v1.16.x + + +* July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7) + * zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829 + * s2: add GetBufferCapacity() method by @GiedriusS in https://github.com/klauspost/compress/pull/832 + +* June 13, 2023 - [v1.16.6](https://github.com/klauspost/compress/releases/tag/v1.16.6) + * zstd: correctly ignore WithEncoderPadding(1) by @ianlancetaylor in https://github.com/klauspost/compress/pull/806 + * zstd: Add amd64 match length assembly https://github.com/klauspost/compress/pull/824 + * gzhttp: Handle informational headers by @rtribotte in https://github.com/klauspost/compress/pull/815 + * s2: Improve Better compression slightly https://github.com/klauspost/compress/pull/663 + +* Apr 16, 2023 - [v1.16.5](https://github.com/klauspost/compress/releases/tag/v1.16.5) + * zstd: readByte needs to use io.ReadFull by @jnoxon in https://github.com/klauspost/compress/pull/802 + * gzip: Fix WriterTo after initial read https://github.com/klauspost/compress/pull/804 + +* Apr 5, 2023 - [v1.16.4](https://github.com/klauspost/compress/releases/tag/v1.16.4) + * zstd: Improve zstd best efficiency by @greatroar and @klauspost in https://github.com/klauspost/compress/pull/784 + * zstd: Respect WithAllLitEntropyCompression https://github.com/klauspost/compress/pull/792 + * zstd: Fix amd64 not always detecting corrupt data https://github.com/klauspost/compress/pull/785 + * zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795 + * s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779 + * s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780 + * gzhttp: Suppport ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 + +* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1) + * zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776 + * gzhttp: Add optional [BREACH mitigation](https://github.com/klauspost/compress/tree/master/gzhttp#breach-mitigation). https://github.com/klauspost/compress/pull/762 https://github.com/klauspost/compress/pull/768 https://github.com/klauspost/compress/pull/769 https://github.com/klauspost/compress/pull/770 https://github.com/klauspost/compress/pull/767 + * s2: Add Intel LZ4s converter https://github.com/klauspost/compress/pull/766 + * zstd: Minor bug fixes https://github.com/klauspost/compress/pull/771 https://github.com/klauspost/compress/pull/772 https://github.com/klauspost/compress/pull/773 + * huff0: Speed up compress1xDo by @greatroar in https://github.com/klauspost/compress/pull/774 + +* Feb 26, 2023 - [v1.16.0](https://github.com/klauspost/compress/releases/tag/v1.16.0) + * s2: Add [Dictionary](https://github.com/klauspost/compress/tree/master/s2#dictionaries) support. https://github.com/klauspost/compress/pull/685 + * s2: Add Compression Size Estimate. https://github.com/klauspost/compress/pull/752 + * s2: Add support for custom stream encoder. https://github.com/klauspost/compress/pull/755 + * s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748 + * s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747 + * s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746 +
+ +
+ See changes to v1.15.x + +* Jan 21st, 2023 (v1.15.15) + * deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739 + * zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728 + * zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745 + * gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740 + +* Jan 3rd, 2023 (v1.15.14) + + * flate: Improve speed in big stateless blocks https://github.com/klauspost/compress/pull/718 + * zstd: Minor speed tweaks by @greatroar in https://github.com/klauspost/compress/pull/716 https://github.com/klauspost/compress/pull/720 + * export NoGzipResponseWriter for custom ResponseWriter wrappers by @harshavardhana in https://github.com/klauspost/compress/pull/722 + * s2: Add example for indexing and existing stream https://github.com/klauspost/compress/pull/723 + +* Dec 11, 2022 (v1.15.13) + * zstd: Add [MaxEncodedSize](https://pkg.go.dev/github.com/klauspost/compress@v1.15.13/zstd#Encoder.MaxEncodedSize) to encoder https://github.com/klauspost/compress/pull/691 + * zstd: Various tweaks and improvements https://github.com/klauspost/compress/pull/693 https://github.com/klauspost/compress/pull/695 https://github.com/klauspost/compress/pull/696 https://github.com/klauspost/compress/pull/701 https://github.com/klauspost/compress/pull/702 https://github.com/klauspost/compress/pull/703 https://github.com/klauspost/compress/pull/704 https://github.com/klauspost/compress/pull/705 https://github.com/klauspost/compress/pull/706 https://github.com/klauspost/compress/pull/707 https://github.com/klauspost/compress/pull/708 + +* Oct 26, 2022 (v1.15.12) + + * zstd: Tweak decoder allocs. https://github.com/klauspost/compress/pull/680 + * gzhttp: Always delete `HeaderNoCompression` https://github.com/klauspost/compress/pull/683 + +* Sept 26, 2022 (v1.15.11) + + * flate: Improve level 1-3 compression https://github.com/klauspost/compress/pull/678 + * zstd: Improve "best" compression by @nightwolfz in https://github.com/klauspost/compress/pull/677 + * zstd: Fix+reduce decompression allocations https://github.com/klauspost/compress/pull/668 + * zstd: Fix non-effective noescape tag https://github.com/klauspost/compress/pull/667 + +* Sept 16, 2022 (v1.15.10) + + * zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649 + * Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651 + * flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656 + * zstd: Improve "better" compresssion https://github.com/klauspost/compress/pull/657 + * s2: Improve "best" compression https://github.com/klauspost/compress/pull/658 + * s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635 + * s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646 + * Use arrays for constant size copies https://github.com/klauspost/compress/pull/659 + +* July 21, 2022 (v1.15.9) + + * zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645 + * zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644 + * zstd: Allow single segments up to "max decoded size" by @klauspost in https://github.com/klauspost/compress/pull/643 + +* July 13, 2022 (v1.15.8) + + * gzip: fix stack exhaustion bug in Reader.Read https://github.com/klauspost/compress/pull/641 + * s2: Add Index header trim/restore https://github.com/klauspost/compress/pull/638 + * zstd: Optimize seqdeq amd64 asm by @greatroar in https://github.com/klauspost/compress/pull/636 + * zstd: Improve decoder memcopy https://github.com/klauspost/compress/pull/637 + * huff0: Pass a single bitReader pointer to asm by @greatroar in https://github.com/klauspost/compress/pull/634 + * zstd: Branchless getBits for amd64 w/o BMI2 by @greatroar in https://github.com/klauspost/compress/pull/640 + * gzhttp: Remove header before writing https://github.com/klauspost/compress/pull/639 + +* June 29, 2022 (v1.15.7) + + * s2: Fix absolute forward seeks https://github.com/klauspost/compress/pull/633 + * zip: Merge upstream https://github.com/klauspost/compress/pull/631 + * zip: Re-add zip64 fix https://github.com/klauspost/compress/pull/624 + * zstd: translate fseDecoder.buildDtable into asm by @WojciechMula in https://github.com/klauspost/compress/pull/598 + * flate: Faster histograms https://github.com/klauspost/compress/pull/620 + * deflate: Use compound hcode https://github.com/klauspost/compress/pull/622 + +* June 3, 2022 (v1.15.6) + * s2: Improve coding for long, close matches https://github.com/klauspost/compress/pull/613 + * s2c: Add Snappy/S2 stream recompression https://github.com/klauspost/compress/pull/611 + * zstd: Always use configured block size https://github.com/klauspost/compress/pull/605 + * zstd: Fix incorrect hash table placement for dict encoding in default https://github.com/klauspost/compress/pull/606 + * zstd: Apply default config to ZipDecompressor without options https://github.com/klauspost/compress/pull/608 + * gzhttp: Exclude more common archive formats https://github.com/klauspost/compress/pull/612 + * s2: Add ReaderIgnoreCRC https://github.com/klauspost/compress/pull/609 + * s2: Remove sanity load on index creation https://github.com/klauspost/compress/pull/607 + * snappy: Use dedicated function for scoring https://github.com/klauspost/compress/pull/614 + * s2c+s2d: Use official snappy framed extension https://github.com/klauspost/compress/pull/610 + +* May 25, 2022 (v1.15.5) + * s2: Add concurrent stream decompression https://github.com/klauspost/compress/pull/602 + * s2: Fix final emit oob read crash on amd64 https://github.com/klauspost/compress/pull/601 + * huff0: asm implementation of Decompress1X by @WojciechMula https://github.com/klauspost/compress/pull/596 + * zstd: Use 1 less goroutine for stream decoding https://github.com/klauspost/compress/pull/588 + * zstd: Copy literal in 16 byte blocks when possible https://github.com/klauspost/compress/pull/592 + * zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599 + * zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593 + * huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586 + * flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590 + + +* May 11, 2022 (v1.15.4) + * huff0: decompress directly into output by @WojciechMula in [#577](https://github.com/klauspost/compress/pull/577) + * inflate: Keep dict on stack [#581](https://github.com/klauspost/compress/pull/581) + * zstd: Faster decoding memcopy in asm [#583](https://github.com/klauspost/compress/pull/583) + * zstd: Fix ignored crc [#580](https://github.com/klauspost/compress/pull/580) + +* May 5, 2022 (v1.15.3) + * zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572) + * s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575) + +* Apr 26, 2022 (v1.15.2) + * zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537) + * zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539) + * s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555) + * Minimum version is Go 1.16, added CI test on 1.18. + +* Mar 11, 2022 (v1.15.1) + * huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512) + * zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514) + * zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520) + * zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521) + * zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523) + +* Mar 3, 2022 (v1.15.0) + * zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498) + * zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505) + * huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507) + * flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509) + * gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400) + * gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510) + +Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines. + +Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected. + +While the release has been extensively tested, it is recommended to testing when upgrading. + +
+ +
+ See changes to v1.14.x + +* Feb 22, 2022 (v1.14.4) + * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503) + * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502) + * zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501 + * huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500) + +* Feb 17, 2022 (v1.14.3) + * flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478) + * flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483) + * s2: Faster compression with Go v1.18 and amd64 microarch level 3+. [#484](https://github.com/klauspost/compress/pull/484) [#486](https://github.com/klauspost/compress/pull/486) + +* Jan 25, 2022 (v1.14.2) + * zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476) + * zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469) + * zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470) + * zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472) + * flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473) + * zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475) + +* Jan 11, 2022 (v1.14.1) + * s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462) + * flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458) + * zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468) + * zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464) + * Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445) +
+ +
+ See changes to v1.13.x + +* Aug 30, 2021 (v1.13.5) + * gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425) + * s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413) + * zstd: pooledZipWriter should return Writers to the same pool [#426](https://github.com/klauspost/compress/pull/426) + * Removed golang/snappy as external dependency for tests [#421](https://github.com/klauspost/compress/pull/421) + +* Aug 12, 2021 (v1.13.4) + * Add [snappy replacement package](https://github.com/klauspost/compress/tree/master/snappy). + * zstd: Fix incorrect encoding in "best" mode [#415](https://github.com/klauspost/compress/pull/415) + +* Aug 3, 2021 (v1.13.3) + * zstd: Improve Best compression [#404](https://github.com/klauspost/compress/pull/404) + * zstd: Fix WriteTo error forwarding [#411](https://github.com/klauspost/compress/pull/411) + * gzhttp: Return http.HandlerFunc instead of http.Handler. Unlikely breaking change. [#406](https://github.com/klauspost/compress/pull/406) + * s2sx: Fix max size error [#399](https://github.com/klauspost/compress/pull/399) + * zstd: Add optional stream content size on reset [#401](https://github.com/klauspost/compress/pull/401) + * zstd: use SpeedBestCompression for level >= 10 [#410](https://github.com/klauspost/compress/pull/410) + +* Jun 14, 2021 (v1.13.1) + * s2: Add full Snappy output support [#396](https://github.com/klauspost/compress/pull/396) + * zstd: Add configurable [Decoder window](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithDecoderMaxWindow) size [#394](https://github.com/klauspost/compress/pull/394) + * gzhttp: Add header to skip compression [#389](https://github.com/klauspost/compress/pull/389) + * s2: Improve speed with bigger output margin [#395](https://github.com/klauspost/compress/pull/395) + +* Jun 3, 2021 (v1.13.0) + * Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors. + * zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382) + * zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380) +
+ + +
+ See changes to v1.12.x + +* May 25, 2021 (v1.12.3) + * deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374) + * deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375) + * zstd: Forward read errors [#373](https://github.com/klauspost/compress/pull/373) + +* Apr 27, 2021 (v1.12.2) + * zstd: Improve better/best compression [#360](https://github.com/klauspost/compress/pull/360) [#364](https://github.com/klauspost/compress/pull/364) [#365](https://github.com/klauspost/compress/pull/365) + * zstd: Add helpers to compress/decompress zstd inside zip files [#363](https://github.com/klauspost/compress/pull/363) + * deflate: Improve level 5+6 compression [#367](https://github.com/klauspost/compress/pull/367) + * s2: Improve better/best compression [#358](https://github.com/klauspost/compress/pull/358) [#359](https://github.com/klauspost/compress/pull/358) + * s2: Load after checking src limit on amd64. [#362](https://github.com/klauspost/compress/pull/362) + * s2sx: Limit max executable size [#368](https://github.com/klauspost/compress/pull/368) + +* Apr 14, 2021 (v1.12.1) + * snappy package removed. Upstream added as dependency. + * s2: Better compression in "best" mode [#353](https://github.com/klauspost/compress/pull/353) + * s2sx: Add stdin input and detect pre-compressed from signature [#352](https://github.com/klauspost/compress/pull/352) + * s2c/s2d: Add http as possible input [#348](https://github.com/klauspost/compress/pull/348) + * s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352) + * zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346) + * s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349) +
+ +
+ See changes to v1.11.x + +* Mar 26, 2021 (v1.11.13) + * zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345) + * zstd: Add [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) encoder option [#336](https://github.com/klauspost/compress/pull/336) + * deflate: Improve entropy compression [#338](https://github.com/klauspost/compress/pull/338) + * s2: Clean up and minor performance improvement in best [#341](https://github.com/klauspost/compress/pull/341) + +* Mar 5, 2021 (v1.11.12) + * s2: Add `s2sx` binary that creates [self extracting archives](https://github.com/klauspost/compress/tree/master/s2#s2sx-self-extracting-archives). + * s2: Speed up decompression on non-assembly platforms [#328](https://github.com/klauspost/compress/pull/328) + +* Mar 1, 2021 (v1.11.9) + * s2: Add ARM64 decompression assembly. Around 2x output speed. [#324](https://github.com/klauspost/compress/pull/324) + * s2: Improve "better" speed and efficiency. [#325](https://github.com/klauspost/compress/pull/325) + * s2: Fix binaries. + +* Feb 25, 2021 (v1.11.8) + * s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended. + * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315) + * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322) + * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314) + * zip: Fix zip64 headers. [#313](https://github.com/klauspost/compress/pull/313) + +* Jan 14, 2021 (v1.11.7) + * Use Bytes() interface to get bytes across packages. [#309](https://github.com/klauspost/compress/pull/309) + * s2: Add 'best' compression option. [#310](https://github.com/klauspost/compress/pull/310) + * s2: Add ReaderMaxBlockSize, changes `s2.NewReader` signature to include varargs. [#311](https://github.com/klauspost/compress/pull/311) + * s2: Fix crash on small better buffers. [#308](https://github.com/klauspost/compress/pull/308) + * s2: Clean up decoder. [#312](https://github.com/klauspost/compress/pull/312) + +* Jan 7, 2021 (v1.11.6) + * zstd: Make decoder allocations smaller [#306](https://github.com/klauspost/compress/pull/306) + * zstd: Free Decoder resources when Reset is called with a nil io.Reader [#305](https://github.com/klauspost/compress/pull/305) + +* Dec 20, 2020 (v1.11.4) + * zstd: Add Best compression mode [#304](https://github.com/klauspost/compress/pull/304) + * Add header decoder [#299](https://github.com/klauspost/compress/pull/299) + * s2: Add uncompressed stream option [#297](https://github.com/klauspost/compress/pull/297) + * Simplify/speed up small blocks with known max size. [#300](https://github.com/klauspost/compress/pull/300) + * zstd: Always reset literal dict encoder [#303](https://github.com/klauspost/compress/pull/303) + +* Nov 15, 2020 (v1.11.3) + * inflate: 10-15% faster decompression [#293](https://github.com/klauspost/compress/pull/293) + * zstd: Tweak DecodeAll default allocation [#295](https://github.com/klauspost/compress/pull/295) + +* Oct 11, 2020 (v1.11.2) + * s2: Fix out of bounds read in "better" block compression [#291](https://github.com/klauspost/compress/pull/291) + +* Oct 1, 2020 (v1.11.1) + * zstd: Set allLitEntropy true in default configuration [#286](https://github.com/klauspost/compress/pull/286) + +* Sept 8, 2020 (v1.11.0) + * zstd: Add experimental compression [dictionaries](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) [#281](https://github.com/klauspost/compress/pull/281) + * zstd: Fix mixed Write and ReadFrom calls [#282](https://github.com/klauspost/compress/pull/282) + * inflate/gz: Limit variable shifts, ~5% faster decompression [#274](https://github.com/klauspost/compress/pull/274) +
+ +
+ See changes to v1.10.x + +* July 8, 2020 (v1.10.11) + * zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278) + * huff0: Also populate compression table when reading decoding table. [#275](https://github.com/klauspost/compress/pull/275) + +* June 23, 2020 (v1.10.10) + * zstd: Skip entropy compression in fastest mode when no matches. [#270](https://github.com/klauspost/compress/pull/270) + +* June 16, 2020 (v1.10.9): + * zstd: API change for specifying dictionaries. See [#268](https://github.com/klauspost/compress/pull/268) + * zip: update CreateHeaderRaw to handle zip64 fields. [#266](https://github.com/klauspost/compress/pull/266) + * Fuzzit tests removed. The service has been purchased and is no longer available. + +* June 5, 2020 (v1.10.8): + * 1.15x faster zstd block decompression. [#265](https://github.com/klauspost/compress/pull/265) + +* June 1, 2020 (v1.10.7): + * Added zstd decompression [dictionary support](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) + * Increase zstd decompression speed up to 1.19x. [#259](https://github.com/klauspost/compress/pull/259) + * Remove internal reset call in zstd compression and reduce allocations. [#263](https://github.com/klauspost/compress/pull/263) + +* May 21, 2020: (v1.10.6) + * zstd: Reduce allocations while decoding. [#258](https://github.com/klauspost/compress/pull/258), [#252](https://github.com/klauspost/compress/pull/252) + * zstd: Stricter decompression checks. + +* April 12, 2020: (v1.10.5) + * s2-commands: Flush output when receiving SIGINT. [#239](https://github.com/klauspost/compress/pull/239) + +* Apr 8, 2020: (v1.10.4) + * zstd: Minor/special case optimizations. [#251](https://github.com/klauspost/compress/pull/251), [#250](https://github.com/klauspost/compress/pull/250), [#249](https://github.com/klauspost/compress/pull/249), [#247](https://github.com/klauspost/compress/pull/247) +* Mar 11, 2020: (v1.10.3) + * s2: Use S2 encoder in pure Go mode for Snappy output as well. [#245](https://github.com/klauspost/compress/pull/245) + * s2: Fix pure Go block encoder. [#244](https://github.com/klauspost/compress/pull/244) + * zstd: Added "better compression" mode. [#240](https://github.com/klauspost/compress/pull/240) + * zstd: Improve speed of fastest compression mode by 5-10% [#241](https://github.com/klauspost/compress/pull/241) + * zstd: Skip creating encoders when not needed. [#238](https://github.com/klauspost/compress/pull/238) + +* Feb 27, 2020: (v1.10.2) + * Close to 50% speedup in inflate (gzip/zip decompression). [#236](https://github.com/klauspost/compress/pull/236) [#234](https://github.com/klauspost/compress/pull/234) [#232](https://github.com/klauspost/compress/pull/232) + * Reduce deflate level 1-6 memory usage up to 59%. [#227](https://github.com/klauspost/compress/pull/227) + +* Feb 18, 2020: (v1.10.1) + * Fix zstd crash when resetting multiple times without sending data. [#226](https://github.com/klauspost/compress/pull/226) + * deflate: Fix dictionary use on level 1-6. [#224](https://github.com/klauspost/compress/pull/224) + * Remove deflate writer reference when closing. [#224](https://github.com/klauspost/compress/pull/224) + +* Feb 4, 2020: (v1.10.0) + * Add optional dictionary to [stateless deflate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc#StatelessDeflate). Breaking change, send `nil` for previous behaviour. [#216](https://github.com/klauspost/compress/pull/216) + * Fix buffer overflow on repeated small block deflate. [#218](https://github.com/klauspost/compress/pull/218) + * Allow copying content from an existing ZIP file without decompressing+compressing. [#214](https://github.com/klauspost/compress/pull/214) + * Added [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) AMD64 assembler and various optimizations. Stream speed >10GB/s. [#186](https://github.com/klauspost/compress/pull/186) + +
+ +
+ See changes prior to v1.10.0 + +* Jan 20,2020 (v1.9.8) Optimize gzip/deflate with better size estimates and faster table generation. [#207](https://github.com/klauspost/compress/pull/207) by [luyu6056](https://github.com/luyu6056), [#206](https://github.com/klauspost/compress/pull/206). +* Jan 11, 2020: S2 Encode/Decode will use provided buffer if capacity is big enough. [#204](https://github.com/klauspost/compress/pull/204) +* Jan 5, 2020: (v1.9.7) Fix another zstd regression in v1.9.5 - v1.9.6 removed. +* Jan 4, 2020: (v1.9.6) Regression in v1.9.5 fixed causing corrupt zstd encodes in rare cases. +* Jan 4, 2020: Faster IO in [s2c + s2d commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) compression/decompression. [#192](https://github.com/klauspost/compress/pull/192) +* Dec 29, 2019: Removed v1.9.5 since fuzz tests showed a compatibility problem with the reference zstandard decoder. +* Dec 29, 2019: (v1.9.5) zstd: 10-20% faster block compression. [#199](https://github.com/klauspost/compress/pull/199) +* Dec 29, 2019: [zip](https://godoc.org/github.com/klauspost/compress/zip) package updated with latest Go features +* Dec 29, 2019: zstd: Single segment flag condintions tweaked. [#197](https://github.com/klauspost/compress/pull/197) +* Dec 18, 2019: s2: Faster compression when ReadFrom is used. [#198](https://github.com/klauspost/compress/pull/198) +* Dec 10, 2019: s2: Fix repeat length output when just above at 16MB limit. +* Dec 10, 2019: zstd: Add function to get decoder as io.ReadCloser. [#191](https://github.com/klauspost/compress/pull/191) +* Dec 3, 2019: (v1.9.4) S2: limit max repeat length. [#188](https://github.com/klauspost/compress/pull/188) +* Dec 3, 2019: Add [WithNoEntropyCompression](https://godoc.org/github.com/klauspost/compress/zstd#WithNoEntropyCompression) to zstd [#187](https://github.com/klauspost/compress/pull/187) +* Dec 3, 2019: Reduce memory use for tests. Check for leaked goroutines. +* Nov 28, 2019 (v1.9.3) Less allocations in stateless deflate. +* Nov 28, 2019: 5-20% Faster huff0 decode. Impacts zstd as well. [#184](https://github.com/klauspost/compress/pull/184) +* Nov 12, 2019 (v1.9.2) Added [Stateless Compression](#stateless-compression) for gzip/deflate. +* Nov 12, 2019: Fixed zstd decompression of large single blocks. [#180](https://github.com/klauspost/compress/pull/180) +* Nov 11, 2019: Set default [s2c](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) block size to 4MB. +* Nov 11, 2019: Reduce inflate memory use by 1KB. +* Nov 10, 2019: Less allocations in deflate bit writer. +* Nov 10, 2019: Fix inconsistent error returned by zstd decoder. +* Oct 28, 2019 (v1.9.1) ztsd: Fix crash when compressing blocks. [#174](https://github.com/klauspost/compress/pull/174) +* Oct 24, 2019 (v1.9.0) zstd: Fix rare data corruption [#173](https://github.com/klauspost/compress/pull/173) +* Oct 24, 2019 zstd: Fix huff0 out of buffer write [#171](https://github.com/klauspost/compress/pull/171) and always return errors [#172](https://github.com/klauspost/compress/pull/172) +* Oct 10, 2019: Big deflate rewrite, 30-40% faster with better compression [#105](https://github.com/klauspost/compress/pull/105) + +
+ +
+ See changes prior to v1.9.0 + +* Oct 10, 2019: (v1.8.6) zstd: Allow partial reads to get flushed data. [#169](https://github.com/klauspost/compress/pull/169) +* Oct 3, 2019: Fix inconsistent results on broken zstd streams. +* Sep 25, 2019: Added `-rm` (remove source files) and `-q` (no output except errors) to `s2c` and `s2d` [commands](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) +* Sep 16, 2019: (v1.8.4) Add `s2c` and `s2d` [commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools). +* Sep 10, 2019: (v1.8.3) Fix s2 decoder [Skip](https://godoc.org/github.com/klauspost/compress/s2#Reader.Skip). +* Sep 7, 2019: zstd: Added [WithWindowSize](https://godoc.org/github.com/klauspost/compress/zstd#WithWindowSize), contributed by [ianwilkes](https://github.com/ianwilkes). +* Sep 5, 2019: (v1.8.2) Add [WithZeroFrames](https://godoc.org/github.com/klauspost/compress/zstd#WithZeroFrames) which adds full zero payload block encoding option. +* Sep 5, 2019: Lazy initialization of zstandard predefined en/decoder tables. +* Aug 26, 2019: (v1.8.1) S2: 1-2% compression increase in "better" compression mode. +* Aug 26, 2019: zstd: Check maximum size of Huffman 1X compressed literals while decoding. +* Aug 24, 2019: (v1.8.0) Added [S2 compression](https://github.com/klauspost/compress/tree/master/s2#s2-compression), a high performance replacement for Snappy. +* Aug 21, 2019: (v1.7.6) Fixed minor issues found by fuzzer. One could lead to zstd not decompressing. +* Aug 18, 2019: Add [fuzzit](https://fuzzit.dev/) continuous fuzzing. +* Aug 14, 2019: zstd: Skip incompressible data 2x faster. [#147](https://github.com/klauspost/compress/pull/147) +* Aug 4, 2019 (v1.7.5): Better literal compression. [#146](https://github.com/klauspost/compress/pull/146) +* Aug 4, 2019: Faster zstd compression. [#143](https://github.com/klauspost/compress/pull/143) [#144](https://github.com/klauspost/compress/pull/144) +* Aug 4, 2019: Faster zstd decompression. [#145](https://github.com/klauspost/compress/pull/145) [#143](https://github.com/klauspost/compress/pull/143) [#142](https://github.com/klauspost/compress/pull/142) +* July 15, 2019 (v1.7.4): Fix double EOF block in rare cases on zstd encoder. +* July 15, 2019 (v1.7.3): Minor speedup/compression increase in default zstd encoder. +* July 14, 2019: zstd decoder: Fix decompression error on multiple uses with mixed content. +* July 7, 2019 (v1.7.2): Snappy update, zstd decoder potential race fix. +* June 17, 2019: zstd decompression bugfix. +* June 17, 2019: fix 32 bit builds. +* June 17, 2019: Easier use in modules (less dependencies). +* June 9, 2019: New stronger "default" [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression mode. Matches zstd default compression ratio. +* June 5, 2019: 20-40% throughput in [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and better compression. +* June 5, 2019: deflate/gzip compression: Reduce memory usage of lower compression levels. +* June 2, 2019: Added [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression! +* May 25, 2019: deflate/gzip: 10% faster bit writer, mostly visible in lower levels. +* Apr 22, 2019: [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) decompression added. +* Aug 1, 2018: Added [huff0 README](https://github.com/klauspost/compress/tree/master/huff0#huff0-entropy-compression). +* Jul 8, 2018: Added [Performance Update 2018](#performance-update-2018) below. +* Jun 23, 2018: Merged [Go 1.11 inflate optimizations](https://go-review.googlesource.com/c/go/+/102235). Go 1.9 is now required. Backwards compatible version tagged with [v1.3.0](https://github.com/klauspost/compress/releases/tag/v1.3.0). +* Apr 2, 2018: Added [huff0](https://godoc.org/github.com/klauspost/compress/huff0) en/decoder. Experimental for now, API may change. +* Mar 4, 2018: Added [FSE Entropy](https://godoc.org/github.com/klauspost/compress/fse) en/decoder. Experimental for now, API may change. +* Nov 3, 2017: Add compression [Estimate](https://godoc.org/github.com/klauspost/compress#Estimate) function. +* May 28, 2017: Reduce allocations when resetting decoder. +* Apr 02, 2017: Change back to official crc32, since changes were merged in Go 1.7. +* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625). +* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before. +* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update. +* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level. +* Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression. +* Mar 24, 2016: Small speedup for level 1-3. +* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. +* Feb 19, 2016: Handle small payloads faster in level 1-3. +* Feb 19, 2016: Added faster level 2 + 3 compression modes. +* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5. +* Feb 14, 2016: Snappy: Merge upstream changes. +* Feb 14, 2016: Snappy: Fix aggressive skipping. +* Feb 14, 2016: Snappy: Update benchmark. +* Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression. +* Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%. +* Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content. +* Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup. +* Jan 16, 2016: Optimization on deflate level 1,2,3 compression. +* Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives. +* Dec 8 2015: Make level 1 and -2 deterministic even if write size differs. +* Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms. +* Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update! +* Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet). +* Nov 20 2015: Small optimization to bit writer on 64 bit systems. +* Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15). +* Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate. +* Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file +* Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x. + +
+ +# deflate usage + +The packages are drop-in replacements for standard libraries. Simply replace the import path to use them: + +| old import | new import | Documentation +|--------------------|-----------------------------------------|--------------------| +| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) +| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) +| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) +| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) + +* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). + +You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages. + +The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/). + +Currently there is only minor speedup on decompression (mostly CRC32 calculation). + +Memory usage is typically 1MB for a Writer. stdlib is in the same range. +If you expect to have a lot of concurrently allocated Writers consider using +the stateless compress described below. + +For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). + +To disable all assembly add `-tags=noasm`. This works across all packages. + +# Stateless compression + +This package offers stateless compression as a special option for gzip/deflate. +It will do compression but without maintaining any state between Write calls. + +This means there will be no memory kept between Write calls, but compression and speed will be suboptimal. + +This is only relevant in cases where you expect to run many thousands of compressors concurrently, +but with very little activity. This is *not* intended for regular web servers serving individual requests. + +Because of this, the size of actual Write calls will affect output size. + +In gzip, specify level `-3` / `gzip.StatelessCompression` to enable. + +For direct deflate use, NewStatelessWriter and StatelessDeflate are available. See [documentation](https://godoc.org/github.com/klauspost/compress/flate#NewStatelessWriter) + +A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer: + +```go + // replace 'ioutil.Discard' with your output. + gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression) + if err != nil { + return err + } + defer gzw.Close() + + w := bufio.NewWriterSize(gzw, 4096) + defer w.Flush() + + // Write to 'w' +``` + +This will only use up to 4KB in memory when the writer is idle. + +Compression is almost always worse than the fastest compression level +and each write will allocate (a little) memory. + +# Performance Update 2018 + +It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD. + +The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet. + +The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input. + +The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet). + + +## Overall differences. + +There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels. + +The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library. + +This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression. + +There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab. + +## Web Content + +This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS. + +Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big. + +Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case. + +## Object files + +This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible. + +The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression. + +The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively. + +## Highly Compressible File + +This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc. + +It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression. + +So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground". + +## Medium-High Compressible + +This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams. + +We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both. + +## Medium Compressible + +I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario. + +The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior. + + +## Un-compressible Content + +This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections. + + +## Huffman only compression + +This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character. + +This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM). + +Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core. + +The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%). + +The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup. + +For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). + +This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip. + +# Other packages + +Here are other packages of good quality and pure Go (no cgo wrappers or autoconverted code): + +* [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression. +* [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression. +* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer. +* [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression. +* [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression. +* [github.com/minio/zipindex](https://github.com/minio/zipindex) - External ZIP directory index. +* [github.com/ybirader/pzip](https://github.com/ybirader/pzip) - Fast concurrent zip archiver and extractor. + +# license + +This code is licensed under the same conditions as the original Go code. See LICENSE file. diff --git a/vendor/github.com/klauspost/compress/SECURITY.md b/vendor/github.com/klauspost/compress/SECURITY.md new file mode 100644 index 0000000000000..ca6685e2b72d9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/SECURITY.md @@ -0,0 +1,25 @@ +# Security Policy + +## Supported Versions + +Security updates are applied only to the latest release. + +## Vulnerability Definition + +A security vulnerability is a bug that with certain input triggers a crash or an infinite loop. Most calls will have varying execution time and only in rare cases will slow operation be considered a security vulnerability. + +Corrupted output generally is not considered a security vulnerability, unless independent operations are able to affect each other. Note that not all functionality is re-entrant and safe to use concurrently. + +Out-of-memory crashes only applies if the en/decoder uses an abnormal amount of memory, with appropriate options applied, to limit maximum window size, concurrency, etc. However, if you are in doubt you are welcome to file a security issue. + +It is assumed that all callers are trusted, meaning internal data exposed through reflection or inspection of returned data structures is not considered a vulnerability. + +Vulnerabilities resulting from compiler/assembler errors should be reported upstream. Depending on the severity this package may or may not implement a workaround. + +## Reporting a Vulnerability + +If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released. + +Please disclose it at [security advisory](https://github.com/klauspost/compress/security/advisories/new). If possible please provide a minimal reproducer. If the issue only applies to a single platform, it would be helpful to provide access to that. + +This project is maintained by a team of volunteers on a reasonable-effort basis. As such, vulnerabilities will be disclosed in a best effort base. diff --git a/vendor/github.com/klauspost/compress/compressible.go b/vendor/github.com/klauspost/compress/compressible.go new file mode 100644 index 0000000000000..ea5a692d5130f --- /dev/null +++ b/vendor/github.com/klauspost/compress/compressible.go @@ -0,0 +1,85 @@ +package compress + +import "math" + +// Estimate returns a normalized compressibility estimate of block b. +// Values close to zero are likely uncompressible. +// Values above 0.1 are likely to be compressible. +// Values above 0.5 are very compressible. +// Very small lengths will return 0. +func Estimate(b []byte) float64 { + if len(b) < 16 { + return 0 + } + + // Correctly predicted order 1 + hits := 0 + lastMatch := false + var o1 [256]byte + var hist [256]int + c1 := byte(0) + for _, c := range b { + if c == o1[c1] { + // We only count a hit if there was two correct predictions in a row. + if lastMatch { + hits++ + } + lastMatch = true + } else { + lastMatch = false + } + o1[c1] = c + c1 = c + hist[c]++ + } + + // Use x^0.6 to give better spread + prediction := math.Pow(float64(hits)/float64(len(b)), 0.6) + + // Calculate histogram distribution + variance := float64(0) + avg := float64(len(b)) / 256 + + for _, v := range hist { + Δ := float64(v) - avg + variance += Δ * Δ + } + + stddev := math.Sqrt(float64(variance)) / float64(len(b)) + exp := math.Sqrt(1 / float64(len(b))) + + // Subtract expected stddev + stddev -= exp + if stddev < 0 { + stddev = 0 + } + stddev *= 1 + exp + + // Use x^0.4 to give better spread + entropy := math.Pow(stddev, 0.4) + + // 50/50 weight between prediction and histogram distribution + return math.Pow((prediction+entropy)/2, 0.9) +} + +// ShannonEntropyBits returns the number of bits minimum required to represent +// an entropy encoding of the input bytes. +// https://en.wiktionary.org/wiki/Shannon_entropy +func ShannonEntropyBits(b []byte) int { + if len(b) == 0 { + return 0 + } + var hist [256]int + for _, c := range b { + hist[c]++ + } + shannon := float64(0) + invTotal := 1.0 / float64(len(b)) + for _, v := range hist[:] { + if v > 0 { + n := float64(v) + shannon += math.Ceil(-math.Log2(n*invTotal) * n) + } + } + return int(math.Ceil(shannon)) +} diff --git a/vendor/github.com/klauspost/compress/fse/README.md b/vendor/github.com/klauspost/compress/fse/README.md new file mode 100644 index 0000000000000..ea7324da671f6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/README.md @@ -0,0 +1,79 @@ +# Finite State Entropy + +This package provides Finite State Entropy encoding and decoding. + +Finite State Entropy (also referenced as [tANS](https://en.wikipedia.org/wiki/Asymmetric_numeral_systems#tANS)) +encoding provides a fast near-optimal symbol encoding/decoding +for byte blocks as implemented in [zstandard](https://github.com/facebook/zstd). + +This can be used for compressing input with a lot of similar input values to the smallest number of bytes. +This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, +but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. + +* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/fse) + +## News + + * Feb 2018: First implementation released. Consider this beta software for now. + +# Usage + +This package provides a low level interface that allows to compress single independent blocks. + +Each block is separate, and there is no built in integrity checks. +This means that the caller should keep track of block sizes and also do checksums if needed. + +Compressing a block is done via the [`Compress`](https://godoc.org/github.com/klauspost/compress/fse#Compress) function. +You must provide input and will receive the output and maybe an error. + +These error values can be returned: + +| Error | Description | +|---------------------|-----------------------------------------------------------------------------| +| `` | Everything ok, output is returned | +| `ErrIncompressible` | Returned when input is judged to be too hard to compress | +| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | +| `(error)` | An internal error occurred. | + +As can be seen above there are errors that will be returned even under normal operation so it is important to handle these. + +To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/fse#Scratch) object +that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same +object can be used for both. + +Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this +you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. + +Decompressing is done by calling the [`Decompress`](https://godoc.org/github.com/klauspost/compress/fse#Decompress) function. +You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back +your input was likely corrupted. + +It is important to note that a successful decoding does *not* mean your output matches your original input. +There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. + +For more detailed usage, see examples in the [godoc documentation](https://godoc.org/github.com/klauspost/compress/fse#pkg-examples). + +# Performance + +A lot of factors are affecting speed. Block sizes and compressibility of the material are primary factors. +All compression functions are currently only running on the calling goroutine so only one core will be used per block. + +The compressor is significantly faster if symbols are kept as small as possible. The highest byte value of the input +is used to reduce some of the processing, so if all your input is above byte value 64 for instance, it may be +beneficial to transpose all your input values down by 64. + +With moderate block sizes around 64k speed are typically 200MB/s per core for compression and +around 300MB/s decompression speed. + +The same hardware typically does Huffman (deflate) encoding at 125MB/s and decompression at 100MB/s. + +# Plans + +At one point, more internals will be exposed to facilitate more "expert" usage of the components. + +A streaming interface is also likely to be implemented. Likely compatible with [FSE stream format](https://github.com/Cyan4973/FiniteStateEntropy/blob/dev/programs/fileio.c#L261). + +# Contributing + +Contributions are always welcome. Be aware that adding public functions will require good justification and breaking +changes will likely not be accepted. If in doubt open an issue before writing the PR. \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/bitreader.go b/vendor/github.com/klauspost/compress/fse/bitreader.go new file mode 100644 index 0000000000000..f65eb3909cf4a --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bitreader.go @@ -0,0 +1,122 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import ( + "encoding/binary" + "errors" + "io" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBits(uint32(v))) + return nil +} + +// getBits will return n bits. n can be 0. +func (b *bitReader) getBits(n uint8) uint16 { + if n == 0 || b.bitsRead >= 64 { + return 0 + } + return b.getBitsFast(n) +} + +// getBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) getBitsFast(n uint8) uint16 { + const regMask = 64 - 1 + v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + b.bitsRead += n + return v +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + // 2 bounds checks. + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value = (b.value << 8) | uint64(b.in[b.off-1]) + b.bitsRead -= 8 + b.off-- + } +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return b.bitsRead >= 64 && b.off == 0 +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/fse/bitwriter.go b/vendor/github.com/klauspost/compress/fse/bitwriter.go new file mode 100644 index 0000000000000..e82fa3bb7b695 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bitwriter.go @@ -0,0 +1,167 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import "fmt" + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16ZeroNC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +// This is fastest if bits can be zero. +func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { + if bits == 0 { + return + } + value <<= (16 - bits) & 15 + value >>= (16 - bits) & 15 + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush will flush all pending full bytes. +// There will be at least 56 bits available for writing when this has been called. +// Using flush32 is faster, but leaves less space for writing. +func (b *bitWriter) flush() { + v := b.nBits >> 3 + switch v { + case 0: + case 1: + b.out = append(b.out, + byte(b.bitContainer), + ) + case 2: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + ) + case 3: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + ) + case 4: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + ) + case 5: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + ) + case 6: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + ) + case 7: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + ) + case 8: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + byte(b.bitContainer>>56), + ) + default: + panic(fmt.Errorf("bits (%d) > 64", b.nBits)) + } + b.bitContainer >>= v << 3 + b.nBits &= 7 +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/vendor/github.com/klauspost/compress/fse/bytereader.go b/vendor/github.com/klauspost/compress/fse/bytereader.go new file mode 100644 index 0000000000000..abade2d605279 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bytereader.go @@ -0,0 +1,47 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// init will initialize the reader and set the input. +func (b *byteReader) init(in []byte) { + b.b = in + b.off = 0 +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go new file mode 100644 index 0000000000000..074018d8f94cc --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/compress.go @@ -0,0 +1,683 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import ( + "errors" + "fmt" +) + +// Compress the input bytes. Input must be < 2GB. +// Provide a Scratch buffer to avoid memory allocations. +// Note that the output is also kept in the scratch buffer. +// If input is too hard to compress, ErrIncompressible is returned. +// If input is a single byte value repeated ErrUseRLE is returned. +func Compress(in []byte, s *Scratch) ([]byte, error) { + if len(in) <= 1 { + return nil, ErrIncompressible + } + if len(in) > (2<<30)-1 { + return nil, errors.New("input too big, must be < 2GB") + } + s, err := s.prepare(in) + if err != nil { + return nil, err + } + + // Create histogram, if none was provided. + maxCount := s.maxCount + if maxCount == 0 { + maxCount = s.countSimple(in) + } + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount == len(in) { + // One symbol, use RLE + return nil, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return nil, ErrIncompressible + } + s.optimalTableLog() + err = s.normalizeCount() + if err != nil { + return nil, err + } + err = s.writeCount() + if err != nil { + return nil, err + } + + if false { + err = s.validateNorm() + if err != nil { + return nil, err + } + } + + err = s.buildCTable() + if err != nil { + return nil, err + } + err = s.compress(in) + if err != nil { + return nil, err + } + s.Out = s.bw.out + // Check if we compressed. + if len(s.Out) >= len(in) { + return nil, ErrIncompressible + } + return s.Out, nil +} + +// cState contains the compression state of a stream. +type cState struct { + bw *bitWriter + stateTable []uint16 + state uint16 +} + +// init will initialize the compression state to the first symbol of the stream. +func (c *cState) init(bw *bitWriter, ct *cTable, tableLog uint8, first symbolTransform) { + c.bw = bw + c.stateTable = ct.stateTable + + nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 + im := int32((nbBitsOut << 16) - first.deltaNbBits) + lu := (im >> nbBitsOut) + first.deltaFindState + c.state = c.stateTable[lu] +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encode(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState + c.bw.addBits16NC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encodeZero(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState + c.bw.addBits16ZeroNC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// flush will write the tablelog to the output and flush the remaining full bytes. +func (c *cState) flush(tableLog uint8) { + c.bw.flush32() + c.bw.addBits16NC(c.state, tableLog) + c.bw.flush() +} + +// compress is the main compression loop that will encode the input from the last byte to the first. +func (s *Scratch) compress(src []byte) error { + if len(src) <= 2 { + return errors.New("compress: src too small") + } + tt := s.ct.symbolTT[:256] + s.bw.reset(s.Out) + + // Our two states each encodes every second byte. + // Last byte encoded (first byte decoded) will always be encoded by c1. + var c1, c2 cState + + // Encode so remaining size is divisible by 4. + ip := len(src) + if ip&1 == 1 { + c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) + c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) + c1.encodeZero(tt[src[ip-3]]) + ip -= 3 + } else { + c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) + c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) + ip -= 2 + } + if ip&2 != 0 { + c2.encodeZero(tt[src[ip-1]]) + c1.encodeZero(tt[src[ip-2]]) + ip -= 2 + } + src = src[:ip] + + // Main compression loop. + switch { + case !s.zeroBits && s.actualTableLog <= 8: + // We can encode 4 symbols without requiring a flush. + // We do not need to check if any output is 0 bits. + for ; len(src) >= 4; src = src[:len(src)-4] { + s.bw.flush32() + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] + c2.encode(tt[v0]) + c1.encode(tt[v1]) + c2.encode(tt[v2]) + c1.encode(tt[v3]) + } + case !s.zeroBits: + // We do not need to check if any output is 0 bits. + for ; len(src) >= 4; src = src[:len(src)-4] { + s.bw.flush32() + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] + c2.encode(tt[v0]) + c1.encode(tt[v1]) + s.bw.flush32() + c2.encode(tt[v2]) + c1.encode(tt[v3]) + } + case s.actualTableLog <= 8: + // We can encode 4 symbols without requiring a flush + for ; len(src) >= 4; src = src[:len(src)-4] { + s.bw.flush32() + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] + c2.encodeZero(tt[v0]) + c1.encodeZero(tt[v1]) + c2.encodeZero(tt[v2]) + c1.encodeZero(tt[v3]) + } + default: + for ; len(src) >= 4; src = src[:len(src)-4] { + s.bw.flush32() + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] + c2.encodeZero(tt[v0]) + c1.encodeZero(tt[v1]) + s.bw.flush32() + c2.encodeZero(tt[v2]) + c1.encodeZero(tt[v3]) + } + } + + // Flush final state. + // Used to initialize state when decoding. + c2.flush(s.actualTableLog) + c1.flush(s.actualTableLog) + + s.bw.close() + return nil +} + +// writeCount will write the normalized histogram count to header. +// This is read back by readNCount. +func (s *Scratch) writeCount() error { + var ( + tableLog = s.actualTableLog + tableSize = 1 << tableLog + previous0 bool + charnum uint16 + + maxHeaderSize = ((int(s.symbolLen)*int(tableLog) + 4 + 2) >> 3) + 3 + + // Write Table Size + bitStream = uint32(tableLog - minTablelog) + bitCount = uint(4) + remaining = int16(tableSize + 1) /* +1 for extra accuracy */ + threshold = int16(tableSize) + nbBits = uint(tableLog + 1) + ) + if cap(s.Out) < maxHeaderSize { + s.Out = make([]byte, 0, s.br.remain()+maxHeaderSize) + } + outP := uint(0) + out := s.Out[:maxHeaderSize] + + // stops at 1 + for remaining > 1 { + if previous0 { + start := charnum + for s.norm[charnum] == 0 { + charnum++ + } + for charnum >= start+24 { + start += 24 + bitStream += uint32(0xFFFF) << bitCount + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + } + for charnum >= start+3 { + start += 3 + bitStream += 3 << bitCount + bitCount += 2 + } + bitStream += uint32(charnum-start) << bitCount + bitCount += 2 + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + count := s.norm[charnum] + charnum++ + max := (2*threshold - 1) - remaining + if count < 0 { + remaining += count + } else { + remaining -= count + } + count++ // +1 for extra accuracy + if count >= threshold { + count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ + } + bitStream += uint32(count) << bitCount + bitCount += nbBits + if count < max { + bitCount-- + } + + previous0 = count == 1 + if remaining < 1 { + return errors.New("internal error: remaining<1") + } + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += (bitCount + 7) / 8 + + if charnum > s.symbolLen { + return errors.New("internal error: charnum > s.symbolLen") + } + s.Out = out[:outP] + return nil +} + +// symbolTransform contains the state transform for a symbol. +type symbolTransform struct { + deltaFindState int32 + deltaNbBits uint32 +} + +// String prints values as a human readable string. +func (s symbolTransform) String() string { + return fmt.Sprintf("dnbits: %08x, fs:%d", s.deltaNbBits, s.deltaFindState) +} + +// cTable contains tables used for compression. +type cTable struct { + tableSymbol []byte + stateTable []uint16 + symbolTT []symbolTransform +} + +// allocCtable will allocate tables needed for compression. +// If existing tables a re big enough, they are simply re-used. +func (s *Scratch) allocCtable() { + tableSize := 1 << s.actualTableLog + // get tableSymbol that is big enough. + if cap(s.ct.tableSymbol) < tableSize { + s.ct.tableSymbol = make([]byte, tableSize) + } + s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] + + ctSize := tableSize + if cap(s.ct.stateTable) < ctSize { + s.ct.stateTable = make([]uint16, ctSize) + } + s.ct.stateTable = s.ct.stateTable[:ctSize] + + if cap(s.ct.symbolTT) < 256 { + s.ct.symbolTT = make([]symbolTransform, 256) + } + s.ct.symbolTT = s.ct.symbolTT[:256] +} + +// buildCTable will populate the compression table so it is ready to be used. +func (s *Scratch) buildCTable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + var cumul [maxSymbolValue + 2]int16 + + s.allocCtable() + tableSymbol := s.ct.tableSymbol[:tableSize] + // symbol start positions + { + cumul[0] = 0 + for ui, v := range s.norm[:s.symbolLen-1] { + u := byte(ui) // one less than reference + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = u + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + } + // Encode last symbol separately to avoid overflowing u + u := int(s.symbolLen - 1) + v := s.norm[s.symbolLen-1] + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = byte(u) + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + if uint32(cumul[s.symbolLen]) != tableSize { + return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) + } + cumul[s.symbolLen] = int16(tableSize) + 1 + } + // Spread symbols + s.zeroBits = false + { + step := tableStep(tableSize) + tableMask := tableSize - 1 + var position uint32 + // if any symbol > largeLimit, we may have 0 bits output. + largeLimit := int16(1 << (s.actualTableLog - 1)) + for ui, v := range s.norm[:s.symbolLen] { + symbol := byte(ui) + if v > largeLimit { + s.zeroBits = true + } + for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + tableSymbol[position] = symbol + position = (position + step) & tableMask + for position > highThreshold { + position = (position + step) & tableMask + } /* Low proba area */ + } + } + + // Check if we have gone through all positions + if position != 0 { + return errors.New("position!=0") + } + } + + // Build table + table := s.ct.stateTable + { + tsi := int(tableSize) + for u, v := range tableSymbol { + // TableU16 : sorted by symbol order; gives next state value + table[cumul[v]] = uint16(tsi + u) + cumul[v]++ + } + } + + // Build Symbol Transformation Table + { + total := int16(0) + symbolTT := s.ct.symbolTT[:s.symbolLen] + tableLog := s.actualTableLog + tl := (uint32(tableLog) << 16) - (1 << tableLog) + for i, v := range s.norm[:s.symbolLen] { + switch v { + case 0: + case -1, 1: + symbolTT[i].deltaNbBits = tl + symbolTT[i].deltaFindState = int32(total - 1) + total++ + default: + maxBitsOut := uint32(tableLog) - highBits(uint32(v-1)) + minStatePlus := uint32(v) << maxBitsOut + symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus + symbolTT[i].deltaFindState = int32(total - v) + total += v + } + } + if total != int16(tableSize) { + return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) + } + } + return nil +} + +// countSimple will create a simple histogram in s.count. +// Returns the biggest count. +// Does not update s.clearCount. +func (s *Scratch) countSimple(in []byte) (max int) { + for _, v := range in { + s.count[v]++ + } + m, symlen := uint32(0), s.symbolLen + for i, v := range s.count[:] { + if v == 0 { + continue + } + if v > m { + m = v + } + symlen = uint16(i) + 1 + } + s.symbolLen = symlen + return int(m) +} + +// minTableLog provides the minimum logSize to safely represent a distribution. +func (s *Scratch) minTableLog() uint8 { + minBitsSrc := highBits(uint32(s.br.remain()-1)) + 1 + minBitsSymbols := highBits(uint32(s.symbolLen-1)) + 2 + if minBitsSrc < minBitsSymbols { + return uint8(minBitsSrc) + } + return uint8(minBitsSymbols) +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *Scratch) optimalTableLog() { + tableLog := s.TableLog + minBits := s.minTableLog() + maxBitsSrc := uint8(highBits(uint32(s.br.remain()-1))) - 2 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minTablelog { + tableLog = minTablelog + } + if tableLog > maxTableLog { + tableLog = maxTableLog + } + s.actualTableLog = tableLog +} + +var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} + +// normalizeCount will normalize the count of the symbols so +// the total is equal to the table size. +func (s *Scratch) normalizeCount() error { + var ( + tableLog = s.actualTableLog + scale = 62 - uint64(tableLog) + step = (1 << 62) / uint64(s.br.remain()) + vStep = uint64(1) << (scale - 20) + stillToDistribute = int16(1 << tableLog) + largest int + largestP int16 + lowThreshold = (uint32)(s.br.remain() >> tableLog) + ) + + for i, cnt := range s.count[:s.symbolLen] { + // already handled + // if (count[s] == s.length) return 0; /* rle special case */ + + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + stillToDistribute-- + } else { + proba := (int16)((uint64(cnt) * step) >> scale) + if proba < 8 { + restToBeat := vStep * uint64(rtbTable[proba]) + v := uint64(cnt)*step - (uint64(proba) << scale) + if v > restToBeat { + proba++ + } + } + if proba > largestP { + largestP = proba + largest = i + } + s.norm[i] = proba + stillToDistribute -= proba + } + } + + if -stillToDistribute >= (s.norm[largest] >> 1) { + // corner case, need another normalization method + return s.normalizeCount2() + } + s.norm[largest] += stillToDistribute + return nil +} + +// Secondary normalization method. +// To be used when primary method fails. +func (s *Scratch) normalizeCount2() error { + const notYetAssigned = -2 + var ( + distributed uint32 + total = uint32(s.br.remain()) + tableLog = s.actualTableLog + lowThreshold = total >> tableLog + lowOne = (total * 3) >> (tableLog + 1) + ) + for i, cnt := range s.count[:s.symbolLen] { + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + distributed++ + total -= cnt + continue + } + if cnt <= lowOne { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + s.norm[i] = notYetAssigned + } + toDistribute := (1 << tableLog) - distributed + + if (total / toDistribute) > lowOne { + // risk of rounding to zero + lowOne = (total * 3) / (toDistribute * 2) + for i, cnt := range s.count[:s.symbolLen] { + if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + } + toDistribute = (1 << tableLog) - distributed + } + if distributed == uint32(s.symbolLen)+1 { + // all values are pretty poor; + // probably incompressible data (should have already been detected); + // find max, then give all remaining points to max + var maxV int + var maxC uint32 + for i, cnt := range s.count[:s.symbolLen] { + if cnt > maxC { + maxV = i + maxC = cnt + } + } + s.norm[maxV] += int16(toDistribute) + return nil + } + + if total == 0 { + // all of the symbols were low enough for the lowOne or lowThreshold + for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { + if s.norm[i] > 0 { + toDistribute-- + s.norm[i]++ + } + } + return nil + } + + var ( + vStepLog = 62 - uint64(tableLog) + mid = uint64((1 << (vStepLog - 1)) - 1) + rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining + tmpTotal = mid + ) + for i, cnt := range s.count[:s.symbolLen] { + if s.norm[i] == notYetAssigned { + var ( + end = tmpTotal + uint64(cnt)*rStep + sStart = uint32(tmpTotal >> vStepLog) + sEnd = uint32(end >> vStepLog) + weight = sEnd - sStart + ) + if weight < 1 { + return errors.New("weight < 1") + } + s.norm[i] = int16(weight) + tmpTotal = end + } + } + return nil +} + +// validateNorm validates the normalized histogram table. +func (s *Scratch) validateNorm() (err error) { + var total int + for _, v := range s.norm[:s.symbolLen] { + if v >= 0 { + total += int(v) + } else { + total -= int(v) + } + } + defer func() { + if err == nil { + return + } + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) + for i, v := range s.norm[:s.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) + } + }() + if total != (1 << s.actualTableLog) { + return fmt.Errorf("warning: Total == %d != %d", total, 1< tablelogAbsoluteMax { + return errors.New("tableLog too large") + } + bitStream >>= 4 + bitCount := uint(4) + + s.actualTableLog = uint8(nbBits) + remaining := int32((1 << nbBits) + 1) + threshold := int32(1 << nbBits) + gotTotal := int32(0) + nbBits++ + + for remaining > 1 { + if previous0 { + n0 := charnum + for (bitStream & 0xFFFF) == 0xFFFF { + n0 += 24 + if b.off < iend-5 { + b.advance(2) + bitStream = b.Uint32() >> bitCount + } else { + bitStream >>= 16 + bitCount += 16 + } + } + for (bitStream & 3) == 3 { + n0 += 3 + bitStream >>= 2 + bitCount += 2 + } + n0 += uint16(bitStream & 3) + bitCount += 2 + if n0 > maxSymbolValue { + return errors.New("maxSymbolValue too small") + } + for charnum < n0 { + s.norm[charnum&0xff] = 0 + charnum++ + } + + if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { + b.advance(bitCount >> 3) + bitCount &= 7 + bitStream = b.Uint32() >> bitCount + } else { + bitStream >>= 2 + } + } + + max := (2*(threshold) - 1) - (remaining) + var count int32 + + if (int32(bitStream) & (threshold - 1)) < max { + count = int32(bitStream) & (threshold - 1) + bitCount += nbBits - 1 + } else { + count = int32(bitStream) & (2*threshold - 1) + if count >= threshold { + count -= max + } + bitCount += nbBits + } + + count-- // extra accuracy + if count < 0 { + // -1 means +1 + remaining += count + gotTotal -= count + } else { + remaining -= count + gotTotal += count + } + s.norm[charnum&0xff] = int16(count) + charnum++ + previous0 = count == 0 + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { + b.advance(bitCount >> 3) + bitCount &= 7 + } else { + bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) + b.off = len(b.b) - 4 + } + bitStream = b.Uint32() >> (bitCount & 31) + } + s.symbolLen = charnum + + if s.symbolLen <= 1 { + return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) + } + if s.symbolLen > maxSymbolValue+1 { + return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) + } + if remaining != 1 { + return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) + } + if bitCount > 32 { + return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) + } + if gotTotal != 1<> 3) + return nil +} + +// decSymbol contains information about a state entry, +// Including the state offset base, the output symbol and +// the number of bits to read for the low part of the destination state. +type decSymbol struct { + newState uint16 + symbol uint8 + nbBits uint8 +} + +// allocDtable will allocate decoding tables if they are not big enough. +func (s *Scratch) allocDtable() { + tableSize := 1 << s.actualTableLog + if cap(s.decTable) < tableSize { + s.decTable = make([]decSymbol, tableSize) + } + s.decTable = s.decTable[:tableSize] + + if cap(s.ct.tableSymbol) < 256 { + s.ct.tableSymbol = make([]byte, 256) + } + s.ct.tableSymbol = s.ct.tableSymbol[:256] + + if cap(s.ct.stateTable) < 256 { + s.ct.stateTable = make([]uint16, 256) + } + s.ct.stateTable = s.ct.stateTable[:256] +} + +// buildDtable will build the decoding table. +func (s *Scratch) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + s.allocDtable() + symbolNext := s.ct.stateTable[:256] + + // Init, lay down lowprob symbols + s.zeroBits = false + { + largeLimit := int16(1 << (s.actualTableLog - 1)) + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.decTable[highThreshold].symbol = uint8(i) + highThreshold-- + symbolNext[i] = 1 + } else { + if v >= largeLimit { + s.zeroBits = true + } + symbolNext[i] = uint16(v) + } + } + } + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.decTable[position].symbol = uint8(ss) + position = (position + step) & tableMask + for position > highThreshold { + // lowprob area + position = (position + step) & tableMask + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.decTable { + symbol := v.symbol + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.decTable[u].nbBits = nBits + newState := (nextState << nBits) - tableSize + if newState >= tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.decTable[u].newState = newState + } + } + return nil +} + +// decompress will decompress the bitstream. +// If the buffer is over-read an error is returned. +func (s *Scratch) decompress() error { + br := &s.bits + if err := br.init(s.br.unread()); err != nil { + return err + } + + var s1, s2 decoder + // Initialize and decode first state and symbol. + s1.init(br, s.decTable, s.actualTableLog) + s2.init(br, s.decTable, s.actualTableLog) + + // Use temp table to avoid bound checks/append penalty. + var tmp = s.ct.tableSymbol[:256] + var off uint8 + + // Main part + if !s.zeroBits { + for br.off >= 8 { + br.fillFast() + tmp[off+0] = s1.nextFast() + tmp[off+1] = s2.nextFast() + br.fillFast() + tmp[off+2] = s1.nextFast() + tmp[off+3] = s2.nextFast() + off += 4 + // When off is 0, we have overflowed and should write. + if off == 0 { + s.Out = append(s.Out, tmp...) + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + } + } else { + for br.off >= 8 { + br.fillFast() + tmp[off+0] = s1.next() + tmp[off+1] = s2.next() + br.fillFast() + tmp[off+2] = s1.next() + tmp[off+3] = s2.next() + off += 4 + if off == 0 { + s.Out = append(s.Out, tmp...) + // When off is 0, we have overflowed and should write. + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + } + } + s.Out = append(s.Out, tmp[:off]...) + + // Final bits, a bit more expensive check + for { + if s1.finished() { + s.Out = append(s.Out, s1.final(), s2.final()) + break + } + br.fill() + s.Out = append(s.Out, s1.next()) + if s2.finished() { + s.Out = append(s.Out, s2.final(), s1.final()) + break + } + s.Out = append(s.Out, s2.next()) + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + return br.close() +} + +// decoder keeps track of the current state and updates it from the bitstream. +type decoder struct { + state uint16 + br *bitReader + dt []decSymbol +} + +// init will initialize the decoder and read the first state from the stream. +func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) { + d.dt = dt + d.br = in + d.state = in.getBits(tableLog) +} + +// next returns the next symbol and sets the next state. +// At least tablelog bits must be available in the bit reader. +func (d *decoder) next() uint8 { + n := &d.dt[d.state] + lowBits := d.br.getBits(n.nbBits) + d.state = n.newState + lowBits + return n.symbol +} + +// finished returns true if all bits have been read from the bitstream +// and the next state would require reading bits from the input. +func (d *decoder) finished() bool { + return d.br.finished() && d.dt[d.state].nbBits > 0 +} + +// final returns the current state symbol without decoding the next. +func (d *decoder) final() uint8 { + return d.dt[d.state].symbol +} + +// nextFast returns the next symbol and sets the next state. +// This can only be used if no symbols are 0 bits. +// At least tablelog bits must be available in the bit reader. +func (d *decoder) nextFast() uint8 { + n := d.dt[d.state] + lowBits := d.br.getBitsFast(n.nbBits) + d.state = n.newState + lowBits + return n.symbol +} diff --git a/vendor/github.com/klauspost/compress/fse/fse.go b/vendor/github.com/klauspost/compress/fse/fse.go new file mode 100644 index 0000000000000..535cbadfdea19 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fse.go @@ -0,0 +1,144 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +// Package fse provides Finite State Entropy encoding and decoding. +// +// Finite State Entropy encoding provides a fast near-optimal symbol encoding/decoding +// for byte blocks as implemented in zstd. +// +// See https://github.com/klauspost/compress/tree/master/fse for more information. +package fse + +import ( + "errors" + "fmt" + "math/bits" +) + +const ( + /*!MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ + maxMemoryUsage = 14 + defaultMemoryUsage = 13 + + maxTableLog = maxMemoryUsage - 2 + maxTablesize = 1 << maxTableLog + defaultTablelog = defaultMemoryUsage - 2 + minTablelog = 5 + maxSymbolValue = 255 +) + +var ( + // ErrIncompressible is returned when input is judged to be too hard to compress. + ErrIncompressible = errors.New("input is not compressible") + + // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. + ErrUseRLE = errors.New("input is single value repeated") +) + +// Scratch provides temporary storage for compression and decompression. +type Scratch struct { + // Private + count [maxSymbolValue + 1]uint32 + norm [maxSymbolValue + 1]int16 + br byteReader + bits bitReader + bw bitWriter + ct cTable // Compression tables. + decTable []decSymbol // Decompression table. + maxCount int // count of the most probable symbol + + // Per block parameters. + // These can be used to override compression parameters of the block. + // Do not touch, unless you know what you are doing. + + // Out is output buffer. + // If the scratch is re-used before the caller is done processing the output, + // set this field to nil. + // Otherwise the output buffer will be re-used for next Compression/Decompression step + // and allocation will be avoided. + Out []byte + + // DecompressLimit limits the maximum decoded size acceptable. + // If > 0 decompression will stop when approximately this many bytes + // has been decoded. + // If 0, maximum size will be 2GB. + DecompressLimit int + + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + zeroBits bool // no bits has prob > 50%. + clearCount bool // clear count + + // MaxSymbolValue will override the maximum symbol value of the next block. + MaxSymbolValue uint8 + + // TableLog will attempt to override the tablelog for the next block. + TableLog uint8 +} + +// Histogram allows to populate the histogram and skip that step in the compression, +// It otherwise allows to inspect the histogram when compression is done. +// To indicate that you have populated the histogram call HistogramFinished +// with the value of the highest populated symbol, as well as the number of entries +// in the most populated entry. These are accepted at face value. +// The returned slice will always be length 256. +func (s *Scratch) Histogram() []uint32 { + return s.count[:] +} + +// HistogramFinished can be called to indicate that the histogram has been populated. +// maxSymbol is the index of the highest set symbol of the next data segment. +// maxCount is the number of entries in the most populated entry. +// These are accepted at face value. +func (s *Scratch) HistogramFinished(maxSymbol uint8, maxCount int) { + s.maxCount = maxCount + s.symbolLen = uint16(maxSymbol) + 1 + s.clearCount = maxCount != 0 +} + +// prepare will prepare and allocate scratch tables used for both compression and decompression. +func (s *Scratch) prepare(in []byte) (*Scratch, error) { + if s == nil { + s = &Scratch{} + } + if s.MaxSymbolValue == 0 { + s.MaxSymbolValue = 255 + } + if s.TableLog == 0 { + s.TableLog = defaultTablelog + } + if s.TableLog > maxTableLog { + return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, maxTableLog) + } + if cap(s.Out) == 0 { + s.Out = make([]byte, 0, len(in)) + } + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + s.br.init(in) + if s.DecompressLimit == 0 { + // Max size 2GB. + s.DecompressLimit = (2 << 30) - 1 + } + + return s, nil +} + +// tableStep returns the next table index. +func tableStep(tableSize uint32) uint32 { + return (tableSize >> 1) + (tableSize >> 3) + 3 +} + +func highBits(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/gen.sh b/vendor/github.com/klauspost/compress/gen.sh new file mode 100644 index 0000000000000..aff942205f1c2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/gen.sh @@ -0,0 +1,4 @@ +#!/bin/sh + +cd s2/cmd/_s2sx/ || exit 1 +go generate . diff --git a/vendor/github.com/klauspost/compress/huff0/.gitignore b/vendor/github.com/klauspost/compress/huff0/.gitignore new file mode 100644 index 0000000000000..b3d262958f856 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/.gitignore @@ -0,0 +1 @@ +/huff0-fuzz.zip diff --git a/vendor/github.com/klauspost/compress/huff0/README.md b/vendor/github.com/klauspost/compress/huff0/README.md new file mode 100644 index 0000000000000..8b6e5c66383d7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/README.md @@ -0,0 +1,89 @@ +# Huff0 entropy compression + +This package provides Huff0 encoding and decoding as used in zstd. + +[Huff0](https://github.com/Cyan4973/FiniteStateEntropy#new-generation-entropy-coders), +a Huffman codec designed for modern CPU, featuring OoO (Out of Order) operations on multiple ALU +(Arithmetic Logic Unit), achieving extremely fast compression and decompression speeds. + +This can be used for compressing input with a lot of similar input values to the smallest number of bytes. +This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, +but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. + +* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/huff0) + +## News + +This is used as part of the [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression package. + +This ensures that most functionality is well tested. + +# Usage + +This package provides a low level interface that allows to compress single independent blocks. + +Each block is separate, and there is no built in integrity checks. +This means that the caller should keep track of block sizes and also do checksums if needed. + +Compressing a block is done via the [`Compress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress1X) and +[`Compress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress4X) functions. +You must provide input and will receive the output and maybe an error. + +These error values can be returned: + +| Error | Description | +|---------------------|-----------------------------------------------------------------------------| +| `` | Everything ok, output is returned | +| `ErrIncompressible` | Returned when input is judged to be too hard to compress | +| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | +| `ErrTooBig` | Returned if the input block exceeds the maximum allowed size (128 Kib) | +| `(error)` | An internal error occurred. | + + +As can be seen above some of there are errors that will be returned even under normal operation so it is important to handle these. + +To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object +that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same +object can be used for both. + +Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this +you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. + +The `Scratch` object will retain state that allows to re-use previous tables for encoding and decoding. + +## Tables and re-use + +Huff0 allows for reusing tables from the previous block to save space if that is expected to give better/faster results. + +The Scratch object allows you to set a [`ReusePolicy`](https://godoc.org/github.com/klauspost/compress/huff0#ReusePolicy) +that controls this behaviour. See the documentation for details. This can be altered between each block. + +Do however note that this information is *not* stored in the output block and it is up to the users of the package to +record whether [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable) should be called, +based on the boolean reported back from the CompressXX call. + +If you want to store the table separate from the data, you can access them as `OutData` and `OutTable` on the +[`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object. + +## Decompressing + +The first part of decoding is to initialize the decoding table through [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable). +This will initialize the decoding tables. +You can supply the complete block to `ReadTable` and it will return the data part of the block +which can be given to the decompressor. + +Decompressing is done by calling the [`Decompress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress1X) +or [`Decompress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress4X) function. + +For concurrently decompressing content with a fixed table a stateless [`Decoder`](https://godoc.org/github.com/klauspost/compress/huff0#Decoder) can be requested which will remain correct as long as the scratch is unchanged. The capacity of the provided slice indicates the expected output size. + +You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back +your input was likely corrupted. + +It is important to note that a successful decoding does *not* mean your output matches your original input. +There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. + +# Contributing + +Contributions are always welcome. Be aware that adding public functions will require good justification and breaking +changes will likely not be accepted. If in doubt open an issue before writing the PR. diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go new file mode 100644 index 0000000000000..e36d9742f9437 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go @@ -0,0 +1,229 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +import ( + "encoding/binary" + "errors" + "fmt" + "io" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReaderBytes struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReaderBytes) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.advance(8 - uint8(highBit32(uint32(v)))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReaderBytes) peekByteFast() uint8 { + got := uint8(b.value >> 56) + return got +} + +func (b *bitReaderBytes) advance(n uint8) { + b.bitsRead += n + b.value <<= n & 63 +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReaderBytes) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + v := b.in[b.off-4 : b.off] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << (b.bitsRead - 32) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read. +func (b *bitReaderBytes) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReaderBytes) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4 : b.off] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << (b.bitsRead - 32) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value |= uint64(b.in[b.off-1]) << (b.bitsRead - 8) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReaderBytes) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +func (b *bitReaderBytes) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReaderBytes) close() error { + // Release reference. + b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +// bitReaderShifted reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReaderShifted struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReaderShifted) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.advance(8 - uint8(highBit32(uint32(v)))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 { + return uint16(b.value >> ((64 - n) & 63)) +} + +func (b *bitReaderShifted) advance(n uint8) { + b.bitsRead += n + b.value <<= n & 63 +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReaderShifted) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + v := b.in[b.off-4 : b.off] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << ((b.bitsRead - 32) & 63) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read. +func (b *bitReaderShifted) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReaderShifted) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4 : b.off] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << ((b.bitsRead - 32) & 63) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value |= uint64(b.in[b.off-1]) << ((b.bitsRead - 8) & 63) + b.bitsRead -= 8 + b.off-- + } +} + +func (b *bitReaderShifted) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReaderShifted) close() error { + // Release reference. + b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go new file mode 100644 index 0000000000000..0ebc9aaac76c0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go @@ -0,0 +1,102 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// encSymbol will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) encSymbol(ct cTable, symbol byte) { + enc := ct[symbol] + b.bitContainer |= uint64(enc.val) << (b.nBits & 63) + if false { + if enc.nBits == 0 { + panic("nbits 0") + } + } + b.nBits += enc.nBits +} + +// encTwoSymbols will add up to 32 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) { + encA := ct[av] + encB := ct[bv] + sh := b.nBits & 63 + combined := uint64(encA.val) | (uint64(encB.val) << (encA.nBits & 63)) + b.bitContainer |= combined << sh + if false { + if encA.nBits == 0 { + panic("nbitsA 0") + } + if encB.nBits == 0 { + panic("nbitsB 0") + } + } + b.nBits += encA.nBits + encB.nBits +} + +// encFourSymbols adds up to 32 bits from four symbols. +// It will not check if there is space for them, +// so the caller must ensure that b has been flushed recently. +func (b *bitWriter) encFourSymbols(encA, encB, encC, encD cTableEntry) { + bitsA := encA.nBits + bitsB := bitsA + encB.nBits + bitsC := bitsB + encC.nBits + bitsD := bitsC + encD.nBits + combined := uint64(encA.val) | + (uint64(encB.val) << (bitsA & 63)) | + (uint64(encC.val) << (bitsB & 63)) | + (uint64(encD.val) << (bitsC & 63)) + b.bitContainer |= combined << (b.nBits & 63) + b.nBits += bitsD +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() +} diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go new file mode 100644 index 0000000000000..84aa3d12f0045 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -0,0 +1,742 @@ +package huff0 + +import ( + "fmt" + "math" + "runtime" + "sync" +) + +// Compress1X will compress the input. +// The output can be decoded using Decompress1X. +// Supply a Scratch object. The scratch object contains state about re-use, +// So when sharing across independent encodes, be sure to set the re-use policy. +func Compress1X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { + s, err = s.prepare(in) + if err != nil { + return nil, false, err + } + return compress(in, s, s.compress1X) +} + +// Compress4X will compress the input. The input is split into 4 independent blocks +// and compressed similar to Compress1X. +// The output can be decoded using Decompress4X. +// Supply a Scratch object. The scratch object contains state about re-use, +// So when sharing across independent encodes, be sure to set the re-use policy. +func Compress4X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { + s, err = s.prepare(in) + if err != nil { + return nil, false, err + } + if false { + // TODO: compress4Xp only slightly faster. + const parallelThreshold = 8 << 10 + if len(in) < parallelThreshold || runtime.GOMAXPROCS(0) == 1 { + return compress(in, s, s.compress4X) + } + return compress(in, s, s.compress4Xp) + } + return compress(in, s, s.compress4X) +} + +func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)) (out []byte, reUsed bool, err error) { + // Nuke previous table if we cannot reuse anyway. + if s.Reuse == ReusePolicyNone { + s.prevTable = s.prevTable[:0] + } + + // Create histogram, if none was provided. + maxCount := s.maxCount + var canReuse = false + if maxCount == 0 { + maxCount, canReuse = s.countSimple(in) + } else { + canReuse = s.canUseTable(s.prevTable) + } + + // We want the output size to be less than this: + wantSize := len(in) + if s.WantLogLess > 0 { + wantSize -= wantSize >> s.WantLogLess + } + + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount >= len(in) { + if maxCount > len(in) { + return nil, false, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) + } + if len(in) == 1 { + return nil, false, ErrIncompressible + } + // One symbol, use RLE + return nil, false, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return nil, false, ErrIncompressible + } + if s.Reuse == ReusePolicyMust && !canReuse { + // We must reuse, but we can't. + return nil, false, ErrIncompressible + } + if (s.Reuse == ReusePolicyPrefer || s.Reuse == ReusePolicyMust) && canReuse { + keepTable := s.cTable + keepTL := s.actualTableLog + s.cTable = s.prevTable + s.actualTableLog = s.prevTableLog + s.Out, err = compressor(in) + s.cTable = keepTable + s.actualTableLog = keepTL + if err == nil && len(s.Out) < wantSize { + s.OutData = s.Out + return s.Out, true, nil + } + if s.Reuse == ReusePolicyMust { + return nil, false, ErrIncompressible + } + // Do not attempt to re-use later. + s.prevTable = s.prevTable[:0] + } + + // Calculate new table. + err = s.buildCTable() + if err != nil { + return nil, false, err + } + + if false && !s.canUseTable(s.cTable) { + panic("invalid table generated") + } + + if s.Reuse == ReusePolicyAllow && canReuse { + hSize := len(s.Out) + oldSize := s.prevTable.estimateSize(s.count[:s.symbolLen]) + newSize := s.cTable.estimateSize(s.count[:s.symbolLen]) + if oldSize <= hSize+newSize || hSize+12 >= wantSize { + // Retain cTable even if we re-use. + keepTable := s.cTable + keepTL := s.actualTableLog + + s.cTable = s.prevTable + s.actualTableLog = s.prevTableLog + s.Out, err = compressor(in) + + // Restore ctable. + s.cTable = keepTable + s.actualTableLog = keepTL + if err != nil { + return nil, false, err + } + if len(s.Out) >= wantSize { + return nil, false, ErrIncompressible + } + s.OutData = s.Out + return s.Out, true, nil + } + } + + // Use new table + err = s.cTable.write(s) + if err != nil { + s.OutTable = nil + return nil, false, err + } + s.OutTable = s.Out + + // Compress using new table + s.Out, err = compressor(in) + if err != nil { + s.OutTable = nil + return nil, false, err + } + if len(s.Out) >= wantSize { + s.OutTable = nil + return nil, false, ErrIncompressible + } + // Move current table into previous. + s.prevTable, s.prevTableLog, s.cTable = s.cTable, s.actualTableLog, s.prevTable[:0] + s.OutData = s.Out[len(s.OutTable):] + return s.Out, false, nil +} + +// EstimateSizes will estimate the data sizes +func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err error) { + s, err = s.prepare(in) + if err != nil { + return 0, 0, 0, err + } + + // Create histogram, if none was provided. + tableSz, dataSz, reuseSz = -1, -1, -1 + maxCount := s.maxCount + var canReuse = false + if maxCount == 0 { + maxCount, canReuse = s.countSimple(in) + } else { + canReuse = s.canUseTable(s.prevTable) + } + + // We want the output size to be less than this: + wantSize := len(in) + if s.WantLogLess > 0 { + wantSize -= wantSize >> s.WantLogLess + } + + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount >= len(in) { + if maxCount > len(in) { + return 0, 0, 0, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) + } + if len(in) == 1 { + return 0, 0, 0, ErrIncompressible + } + // One symbol, use RLE + return 0, 0, 0, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return 0, 0, 0, ErrIncompressible + } + + // Calculate new table. + err = s.buildCTable() + if err != nil { + return 0, 0, 0, err + } + + if false && !s.canUseTable(s.cTable) { + panic("invalid table generated") + } + + tableSz, err = s.cTable.estTableSize(s) + if err != nil { + return 0, 0, 0, err + } + if canReuse { + reuseSz = s.prevTable.estimateSize(s.count[:s.symbolLen]) + } + dataSz = s.cTable.estimateSize(s.count[:s.symbolLen]) + + // Restore + return tableSz, dataSz, reuseSz, nil +} + +func (s *Scratch) compress1X(src []byte) ([]byte, error) { + return s.compress1xDo(s.Out, src), nil +} + +func (s *Scratch) compress1xDo(dst, src []byte) []byte { + var bw = bitWriter{out: dst} + + // N is length divisible by 4. + n := len(src) + n -= n & 3 + cTable := s.cTable[:256] + + // Encode last bytes. + for i := len(src) & 3; i > 0; i-- { + bw.encSymbol(cTable, src[n+i-1]) + } + n -= 4 + if s.actualTableLog <= 8 { + for ; n >= 0; n -= 4 { + tmp := src[n : n+4] + // tmp should be len 4 + bw.flush32() + bw.encFourSymbols(cTable[tmp[3]], cTable[tmp[2]], cTable[tmp[1]], cTable[tmp[0]]) + } + } else { + for ; n >= 0; n -= 4 { + tmp := src[n : n+4] + // tmp should be len 4 + bw.flush32() + bw.encTwoSymbols(cTable, tmp[3], tmp[2]) + bw.flush32() + bw.encTwoSymbols(cTable, tmp[1], tmp[0]) + } + } + bw.close() + return bw.out +} + +var sixZeros [6]byte + +func (s *Scratch) compress4X(src []byte) ([]byte, error) { + if len(src) < 12 { + return nil, ErrIncompressible + } + segmentSize := (len(src) + 3) / 4 + + // Add placeholder for output length + offsetIdx := len(s.Out) + s.Out = append(s.Out, sixZeros[:]...) + + for i := 0; i < 4; i++ { + toDo := src + if len(toDo) > segmentSize { + toDo = toDo[:segmentSize] + } + src = src[len(toDo):] + + idx := len(s.Out) + s.Out = s.compress1xDo(s.Out, toDo) + if len(s.Out)-idx > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } + // Write compressed length as little endian before block. + if i < 3 { + // Last length is not written. + length := len(s.Out) - idx + s.Out[i*2+offsetIdx] = byte(length) + s.Out[i*2+offsetIdx+1] = byte(length >> 8) + } + } + + return s.Out, nil +} + +// compress4Xp will compress 4 streams using separate goroutines. +func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { + if len(src) < 12 { + return nil, ErrIncompressible + } + // Add placeholder for output length + s.Out = s.Out[:6] + + segmentSize := (len(src) + 3) / 4 + var wg sync.WaitGroup + wg.Add(4) + for i := 0; i < 4; i++ { + toDo := src + if len(toDo) > segmentSize { + toDo = toDo[:segmentSize] + } + src = src[len(toDo):] + + // Separate goroutine for each block. + go func(i int) { + s.tmpOut[i] = s.compress1xDo(s.tmpOut[i][:0], toDo) + wg.Done() + }(i) + } + wg.Wait() + for i := 0; i < 4; i++ { + o := s.tmpOut[i] + if len(o) > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } + // Write compressed length as little endian before block. + if i < 3 { + // Last length is not written. + s.Out[i*2] = byte(len(o)) + s.Out[i*2+1] = byte(len(o) >> 8) + } + + // Write output. + s.Out = append(s.Out, o...) + } + return s.Out, nil +} + +// countSimple will create a simple histogram in s.count. +// Returns the biggest count. +// Does not update s.clearCount. +func (s *Scratch) countSimple(in []byte) (max int, reuse bool) { + reuse = true + _ = s.count // Assert that s != nil to speed up the following loop. + for _, v := range in { + s.count[v]++ + } + m := uint32(0) + if len(s.prevTable) > 0 { + for i, v := range s.count[:] { + if v == 0 { + continue + } + if v > m { + m = v + } + s.symbolLen = uint16(i) + 1 + if i >= len(s.prevTable) { + reuse = false + } else if s.prevTable[i].nBits == 0 { + reuse = false + } + } + return int(m), reuse + } + for i, v := range s.count[:] { + if v == 0 { + continue + } + if v > m { + m = v + } + s.symbolLen = uint16(i) + 1 + } + return int(m), false +} + +func (s *Scratch) canUseTable(c cTable) bool { + if len(c) < int(s.symbolLen) { + return false + } + for i, v := range s.count[:s.symbolLen] { + if v != 0 && c[i].nBits == 0 { + return false + } + } + return true +} + +//lint:ignore U1000 used for debugging +func (s *Scratch) validateTable(c cTable) bool { + if len(c) < int(s.symbolLen) { + return false + } + for i, v := range s.count[:s.symbolLen] { + if v != 0 { + if c[i].nBits == 0 { + return false + } + if c[i].nBits > s.actualTableLog { + return false + } + } + } + return true +} + +// minTableLog provides the minimum logSize to safely represent a distribution. +func (s *Scratch) minTableLog() uint8 { + minBitsSrc := highBit32(uint32(s.srcLen)) + 1 + minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2 + if minBitsSrc < minBitsSymbols { + return uint8(minBitsSrc) + } + return uint8(minBitsSymbols) +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *Scratch) optimalTableLog() { + tableLog := s.TableLog + minBits := s.minTableLog() + maxBitsSrc := uint8(highBit32(uint32(s.srcLen-1))) - 1 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minTablelog { + tableLog = minTablelog + } + if tableLog > tableLogMax { + tableLog = tableLogMax + } + s.actualTableLog = tableLog +} + +type cTableEntry struct { + val uint16 + nBits uint8 + // We have 8 bits extra +} + +const huffNodesMask = huffNodesLen - 1 + +func (s *Scratch) buildCTable() error { + s.optimalTableLog() + s.huffSort() + if cap(s.cTable) < maxSymbolValue+1 { + s.cTable = make([]cTableEntry, s.symbolLen, maxSymbolValue+1) + } else { + s.cTable = s.cTable[:s.symbolLen] + for i := range s.cTable { + s.cTable[i] = cTableEntry{} + } + } + + var startNode = int16(s.symbolLen) + nonNullRank := s.symbolLen - 1 + + nodeNb := startNode + huffNode := s.nodes[1 : huffNodesLen+1] + + // This overlays the slice above, but allows "-1" index lookups. + // Different from reference implementation. + huffNode0 := s.nodes[0 : huffNodesLen+1] + + for huffNode[nonNullRank].count() == 0 { + nonNullRank-- + } + + lowS := int16(nonNullRank) + nodeRoot := nodeNb + lowS - 1 + lowN := nodeNb + huffNode[nodeNb].setCount(huffNode[lowS].count() + huffNode[lowS-1].count()) + huffNode[lowS].setParent(nodeNb) + huffNode[lowS-1].setParent(nodeNb) + nodeNb++ + lowS -= 2 + for n := nodeNb; n <= nodeRoot; n++ { + huffNode[n].setCount(1 << 30) + } + // fake entry, strong barrier + huffNode0[0].setCount(1 << 31) + + // create parents + for nodeNb <= nodeRoot { + var n1, n2 int16 + if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { + n1 = lowS + lowS-- + } else { + n1 = lowN + lowN++ + } + if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { + n2 = lowS + lowS-- + } else { + n2 = lowN + lowN++ + } + + huffNode[nodeNb].setCount(huffNode0[n1+1].count() + huffNode0[n2+1].count()) + huffNode0[n1+1].setParent(nodeNb) + huffNode0[n2+1].setParent(nodeNb) + nodeNb++ + } + + // distribute weights (unlimited tree height) + huffNode[nodeRoot].setNbBits(0) + for n := nodeRoot - 1; n >= startNode; n-- { + huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) + } + for n := uint16(0); n <= nonNullRank; n++ { + huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) + } + s.actualTableLog = s.setMaxHeight(int(nonNullRank)) + maxNbBits := s.actualTableLog + + // fill result into tree (val, nbBits) + if maxNbBits > tableLogMax { + return fmt.Errorf("internal error: maxNbBits (%d) > tableLogMax (%d)", maxNbBits, tableLogMax) + } + var nbPerRank [tableLogMax + 1]uint16 + var valPerRank [16]uint16 + for _, v := range huffNode[:nonNullRank+1] { + nbPerRank[v.nbBits()]++ + } + // determine stating value per rank + { + min := uint16(0) + for n := maxNbBits; n > 0; n-- { + // get starting value within each rank + valPerRank[n] = min + min += nbPerRank[n] + min >>= 1 + } + } + + // push nbBits per symbol, symbol order + for _, v := range huffNode[:nonNullRank+1] { + s.cTable[v.symbol()].nBits = v.nbBits() + } + + // assign value within rank, symbol order + t := s.cTable[:s.symbolLen] + for n, val := range t { + nbits := val.nBits & 15 + v := valPerRank[nbits] + t[n].val = v + valPerRank[nbits] = v + 1 + } + + return nil +} + +// huffSort will sort symbols, decreasing order. +func (s *Scratch) huffSort() { + type rankPos struct { + base uint32 + current uint32 + } + + // Clear nodes + nodes := s.nodes[:huffNodesLen+1] + s.nodes = nodes + nodes = nodes[1 : huffNodesLen+1] + + // Sort into buckets based on length of symbol count. + var rank [32]rankPos + for _, v := range s.count[:s.symbolLen] { + r := highBit32(v+1) & 31 + rank[r].base++ + } + // maxBitLength is log2(BlockSizeMax) + 1 + const maxBitLength = 18 + 1 + for n := maxBitLength; n > 0; n-- { + rank[n-1].base += rank[n].base + } + for n := range rank[:maxBitLength] { + rank[n].current = rank[n].base + } + for n, c := range s.count[:s.symbolLen] { + r := (highBit32(c+1) + 1) & 31 + pos := rank[r].current + rank[r].current++ + prev := nodes[(pos-1)&huffNodesMask] + for pos > rank[r].base && c > prev.count() { + nodes[pos&huffNodesMask] = prev + pos-- + prev = nodes[(pos-1)&huffNodesMask] + } + nodes[pos&huffNodesMask] = makeNodeElt(c, byte(n)) + } +} + +func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { + maxNbBits := s.actualTableLog + huffNode := s.nodes[1 : huffNodesLen+1] + //huffNode = huffNode[: huffNodesLen] + + largestBits := huffNode[lastNonNull].nbBits() + + // early exit : no elt > maxNbBits + if largestBits <= maxNbBits { + return largestBits + } + totalCost := int(0) + baseCost := int(1) << (largestBits - maxNbBits) + n := uint32(lastNonNull) + + for huffNode[n].nbBits() > maxNbBits { + totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits())) + huffNode[n].setNbBits(maxNbBits) + n-- + } + // n stops at huffNode[n].nbBits <= maxNbBits + + for huffNode[n].nbBits() == maxNbBits { + n-- + } + // n end at index of smallest symbol using < maxNbBits + + // renorm totalCost + totalCost >>= largestBits - maxNbBits /* note : totalCost is necessarily a multiple of baseCost */ + + // repay normalized cost + { + const noSymbol = 0xF0F0F0F0 + var rankLast [tableLogMax + 2]uint32 + + for i := range rankLast[:] { + rankLast[i] = noSymbol + } + + // Get pos of last (smallest) symbol per rank + { + currentNbBits := maxNbBits + for pos := int(n); pos >= 0; pos-- { + if huffNode[pos].nbBits() >= currentNbBits { + continue + } + currentNbBits = huffNode[pos].nbBits() // < maxNbBits + rankLast[maxNbBits-currentNbBits] = uint32(pos) + } + } + + for totalCost > 0 { + nBitsToDecrease := uint8(highBit32(uint32(totalCost))) + 1 + + for ; nBitsToDecrease > 1; nBitsToDecrease-- { + highPos := rankLast[nBitsToDecrease] + lowPos := rankLast[nBitsToDecrease-1] + if highPos == noSymbol { + continue + } + if lowPos == noSymbol { + break + } + highTotal := huffNode[highPos].count() + lowTotal := 2 * huffNode[lowPos].count() + if highTotal <= lowTotal { + break + } + } + // only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) + // HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary + // FIXME: try to remove + for (nBitsToDecrease <= tableLogMax) && (rankLast[nBitsToDecrease] == noSymbol) { + nBitsToDecrease++ + } + totalCost -= 1 << (nBitsToDecrease - 1) + if rankLast[nBitsToDecrease-1] == noSymbol { + // this rank is no longer empty + rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease] + } + huffNode[rankLast[nBitsToDecrease]].setNbBits(1 + + huffNode[rankLast[nBitsToDecrease]].nbBits()) + if rankLast[nBitsToDecrease] == 0 { + /* special case, reached largest symbol */ + rankLast[nBitsToDecrease] = noSymbol + } else { + rankLast[nBitsToDecrease]-- + if huffNode[rankLast[nBitsToDecrease]].nbBits() != maxNbBits-nBitsToDecrease { + rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */ + } + } + } + + for totalCost < 0 { /* Sometimes, cost correction overshoot */ + if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ + for huffNode[n].nbBits() == maxNbBits { + n-- + } + huffNode[n+1].setNbBits(huffNode[n+1].nbBits() - 1) + rankLast[1] = n + 1 + totalCost++ + continue + } + huffNode[rankLast[1]+1].setNbBits(huffNode[rankLast[1]+1].nbBits() - 1) + rankLast[1]++ + totalCost++ + } + } + return maxNbBits +} + +// A nodeElt is the fields +// +// count uint32 +// parent uint16 +// symbol byte +// nbBits uint8 +// +// in some order, all squashed into an integer so that the compiler +// always loads and stores entire nodeElts instead of separate fields. +type nodeElt uint64 + +func makeNodeElt(count uint32, symbol byte) nodeElt { + return nodeElt(count) | nodeElt(symbol)<<48 +} + +func (e *nodeElt) count() uint32 { return uint32(*e) } +func (e *nodeElt) parent() uint16 { return uint16(*e >> 32) } +func (e *nodeElt) symbol() byte { return byte(*e >> 48) } +func (e *nodeElt) nbBits() uint8 { return uint8(*e >> 56) } + +func (e *nodeElt) setCount(c uint32) { *e = (*e)&0xffffffff00000000 | nodeElt(c) } +func (e *nodeElt) setParent(p int16) { *e = (*e)&0xffff0000ffffffff | nodeElt(uint16(p))<<32 } +func (e *nodeElt) setNbBits(n uint8) { *e = (*e)&0x00ffffffffffffff | nodeElt(n)<<56 } diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go new file mode 100644 index 0000000000000..54bd08b25c0ae --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -0,0 +1,1167 @@ +package huff0 + +import ( + "errors" + "fmt" + "io" + "sync" + + "github.com/klauspost/compress/fse" +) + +type dTable struct { + single []dEntrySingle +} + +// single-symbols decoding +type dEntrySingle struct { + entry uint16 +} + +// Uses special code for all tables that are < 8 bits. +const use8BitTables = true + +// ReadTable will read a table from the input. +// The size of the input may be larger than the table definition. +// Any content remaining after the table definition will be returned. +// If no Scratch is provided a new one is allocated. +// The returned Scratch can be used for encoding or decoding input using this table. +func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { + s, err = s.prepare(nil) + if err != nil { + return s, nil, err + } + if len(in) <= 1 { + return s, nil, errors.New("input too small for table") + } + iSize := in[0] + in = in[1:] + if iSize >= 128 { + // Uncompressed + oSize := iSize - 127 + iSize = (oSize + 1) / 2 + if int(iSize) > len(in) { + return s, nil, errors.New("input too small for table") + } + for n := uint8(0); n < oSize; n += 2 { + v := in[n/2] + s.huffWeight[n] = v >> 4 + s.huffWeight[n+1] = v & 15 + } + s.symbolLen = uint16(oSize) + in = in[iSize:] + } else { + if len(in) < int(iSize) { + return s, nil, fmt.Errorf("input too small for table, want %d bytes, have %d", iSize, len(in)) + } + // FSE compressed weights + s.fse.DecompressLimit = 255 + hw := s.huffWeight[:] + s.fse.Out = hw + b, err := fse.Decompress(in[:iSize], s.fse) + s.fse.Out = nil + if err != nil { + return s, nil, fmt.Errorf("fse decompress returned: %w", err) + } + if len(b) > 255 { + return s, nil, errors.New("corrupt input: output table too large") + } + s.symbolLen = uint16(len(b)) + in = in[iSize:] + } + + // collect weight stats + var rankStats [16]uint32 + weightTotal := uint32(0) + for _, v := range s.huffWeight[:s.symbolLen] { + if v > tableLogMax { + return s, nil, errors.New("corrupt input: weight too large") + } + v2 := v & 15 + rankStats[v2]++ + // (1 << (v2-1)) is slower since the compiler cannot prove that v2 isn't 0. + weightTotal += (1 << v2) >> 1 + } + if weightTotal == 0 { + return s, nil, errors.New("corrupt input: weights zero") + } + + // get last non-null symbol weight (implied, total must be 2^n) + { + tableLog := highBit32(weightTotal) + 1 + if tableLog > tableLogMax { + return s, nil, errors.New("corrupt input: tableLog too big") + } + s.actualTableLog = uint8(tableLog) + // determine last weight + { + total := uint32(1) << tableLog + rest := total - weightTotal + verif := uint32(1) << highBit32(rest) + lastWeight := highBit32(rest) + 1 + if verif != rest { + // last value must be a clean power of 2 + return s, nil, errors.New("corrupt input: last value not power of two") + } + s.huffWeight[s.symbolLen] = uint8(lastWeight) + s.symbolLen++ + rankStats[lastWeight]++ + } + } + + if (rankStats[1] < 2) || (rankStats[1]&1 != 0) { + // by construction : at least 2 elts of rank 1, must be even + return s, nil, errors.New("corrupt input: min elt size, even check failed ") + } + + // TODO: Choose between single/double symbol decoding + + // Calculate starting value for each rank + { + var nextRankStart uint32 + for n := uint8(1); n < s.actualTableLog+1; n++ { + current := nextRankStart + nextRankStart += rankStats[n] << (n - 1) + rankStats[n] = current + } + } + + // fill DTable (always full size) + tSize := 1 << tableLogMax + if len(s.dt.single) != tSize { + s.dt.single = make([]dEntrySingle, tSize) + } + cTable := s.prevTable + if cap(cTable) < maxSymbolValue+1 { + cTable = make([]cTableEntry, 0, maxSymbolValue+1) + } + cTable = cTable[:maxSymbolValue+1] + s.prevTable = cTable[:s.symbolLen] + s.prevTableLog = s.actualTableLog + + for n, w := range s.huffWeight[:s.symbolLen] { + if w == 0 { + cTable[n] = cTableEntry{ + val: 0, + nBits: 0, + } + continue + } + length := (uint32(1) << w) >> 1 + d := dEntrySingle{ + entry: uint16(s.actualTableLog+1-w) | (uint16(n) << 8), + } + + rank := &rankStats[w] + cTable[n] = cTableEntry{ + val: uint16(*rank >> (w - 1)), + nBits: uint8(d.entry), + } + + single := s.dt.single[*rank : *rank+length] + for i := range single { + single[i] = d + } + *rank += length + } + + return s, in, nil +} + +// Decompress1X will decompress a 1X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// Before this is called, the table must be initialized with ReadTable unless +// the encoder re-used the table. +// deprecated: Use the stateless Decoder() to get a concurrent version. +func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) { + if cap(s.Out) < s.MaxDecodedSize { + s.Out = make([]byte, s.MaxDecodedSize) + } + s.Out = s.Out[:0:s.MaxDecodedSize] + s.Out, err = s.Decoder().Decompress1X(s.Out, in) + return s.Out, err +} + +// Decompress4X will decompress a 4X encoded stream. +// Before this is called, the table must be initialized with ReadTable unless +// the encoder re-used the table. +// The length of the supplied input must match the end of a block exactly. +// The destination size of the uncompressed data must be known and provided. +// deprecated: Use the stateless Decoder() to get a concurrent version. +func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) { + if dstSize > s.MaxDecodedSize { + return nil, ErrMaxDecodedSizeExceeded + } + if cap(s.Out) < dstSize { + s.Out = make([]byte, s.MaxDecodedSize) + } + s.Out = s.Out[:0:dstSize] + s.Out, err = s.Decoder().Decompress4X(s.Out, in) + return s.Out, err +} + +// Decoder will return a stateless decoder that can be used by multiple +// decompressors concurrently. +// Before this is called, the table must be initialized with ReadTable. +// The Decoder is still linked to the scratch buffer so that cannot be reused. +// However, it is safe to discard the scratch. +func (s *Scratch) Decoder() *Decoder { + return &Decoder{ + dt: s.dt, + actualTableLog: s.actualTableLog, + bufs: &s.decPool, + } +} + +// Decoder provides stateless decoding. +type Decoder struct { + dt dTable + actualTableLog uint8 + bufs *sync.Pool +} + +func (d *Decoder) buffer() *[4][256]byte { + buf, ok := d.bufs.Get().(*[4][256]byte) + if ok { + return buf + } + return &[4][256]byte{} +} + +// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { + if d.actualTableLog == 8 { + return d.decompress1X8BitExactly(dst, src) + } + var br bitReaderBytes + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + dt := d.dt.single[:256] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + switch d.actualTableLog { + case 8: + const shift = 0 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 7: + const shift = 8 - 7 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 6: + const shift = 8 - 6 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 5: + const shift = 8 - 5 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 4: + const shift = 8 - 4 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 3: + const shift = 8 - 3 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 2: + const shift = 8 - 2 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 1: + const shift = 8 - 1 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + default: + d.bufs.Put(bufs) + return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog) + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 4, so uint8 is fine + bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) + shift := (8 - d.actualTableLog) & 7 + + for bitsLeft > 0 { + if br.bitsRead >= 64-8 { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + if len(dst) >= maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + v := dt[br.peekByteFast()>>shift] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= int8(nBits) + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} + +// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { + var br bitReaderBytes + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + dt := d.dt.single[:256] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + const shift = 56 + + //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog) + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 4, so uint8 is fine + bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) + for bitsLeft > 0 { + if br.bitsRead >= 64-8 { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := dt[br.peekByteFast()] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= int8(nBits) + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { + if d.actualTableLog == 8 { + return d.decompress4X8bitExactly(dst, src) + } + + var br [4]bitReaderBytes + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + shift := (56 + (8 - d.actualTableLog)) & 63 + + const tlSize = 1 << 8 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 4 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + // Interleave 2 decodes. + const stream = 0 + const stream2 = 1 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + off += 4 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + // There must at least be 3 buffers left. + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + if br.finished() { + d.bufs.Put(buf) + return nil, io.ErrUnexpectedEOF + } + if br.bitsRead >= 56 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value |= uint64(low) << (br.bitsRead - 32) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + v := single[uint8(br.value>>shift)].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + d.bufs.Put(buf) + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { + var br [4]bitReaderBytes + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const shift = 56 + const tlSize = 1 << 8 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 4 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + // Interleave 2 decodes. + const stream = 0 + const stream2 = 1 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + off += 4 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + // There must at least be 3 buffers left. + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + // copy(out[dstEvery*3:], buf[3][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + if br.finished() { + d.bufs.Put(buf) + return nil, io.ErrUnexpectedEOF + } + if br.bitsRead >= 56 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value |= uint64(low) << (br.bitsRead - 32) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + v := single[br.peekByteFast()].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + d.bufs.Put(buf) + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// matches will compare a decoding table to a coding table. +// Errors are written to the writer. +// Nothing will be written if table is ok. +func (s *Scratch) matches(ct cTable, w io.Writer) { + if s == nil || len(s.dt.single) == 0 { + return + } + dt := s.dt.single[:1<>8) == byte(sym) { + fmt.Fprintf(w, "symbol %x has decoder, but no encoder\n", sym) + errs++ + break + } + } + if errs == 0 { + broken-- + } + continue + } + // Unused bits in input + ub := tablelog - enc.nBits + top := enc.val << ub + // decoder looks at top bits. + dec := dt[top] + if uint8(dec.entry) != enc.nBits { + fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, uint8(dec.entry)) + errs++ + } + if uint8(dec.entry>>8) != uint8(sym) { + fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, uint8(dec.entry>>8)) + errs++ + } + if errs > 0 { + fmt.Fprintf(w, "%d errros in base, stopping\n", errs) + continue + } + // Ensure that all combinations are covered. + for i := uint16(0); i < (1 << ub); i++ { + vval := top | i + dec := dt[vval] + if uint8(dec.entry) != enc.nBits { + fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, uint8(dec.entry)) + errs++ + } + if uint8(dec.entry>>8) != uint8(sym) { + fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, uint8(dec.entry>>8)) + errs++ + } + if errs > 20 { + fmt.Fprintf(w, "%d errros, stopping\n", errs) + break + } + } + if errs == 0 { + ok++ + broken-- + } + } + if broken > 0 { + fmt.Fprintf(w, "%d broken, %d ok\n", broken, ok) + } +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go new file mode 100644 index 0000000000000..ba7e8e6b02768 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go @@ -0,0 +1,226 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// This file contains the specialisation of Decoder.Decompress4X +// and Decoder.Decompress1X that use an asm implementation of thir main loops. +package huff0 + +import ( + "errors" + "fmt" + + "github.com/klauspost/compress/internal/cpuinfo" +) + +// decompress4x_main_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog > 8. +// +//go:noescape +func decompress4x_main_loop_amd64(ctx *decompress4xContext) + +// decompress4x_8b_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog <= 8 which decodes 4 entries +// per loop. +// +//go:noescape +func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) + +// fallback8BitSize is the size where using Go version is faster. +const fallback8BitSize = 800 + +type decompress4xContext struct { + pbr *[4]bitReaderShifted + peekBits uint8 + out *byte + dstEvery int + tbl *dEntrySingle + decoded int + limit *byte +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + + use8BitTables := d.actualTableLog <= 8 + if cap(dst) < fallback8BitSize && use8BitTables { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + var decoded int + + if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) { + ctx := decompress4xContext{ + pbr: &br, + peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() + out: &out[0], + dstEvery: dstEvery, + tbl: &single[0], + limit: &out[dstEvery-4], // Always stop decoding when first buffer gets here to avoid writing OOB on last. + } + if use8BitTables { + decompress4x_8b_main_loop_amd64(&ctx) + } else { + decompress4x_main_loop_amd64(&ctx) + } + + decoded = ctx.decoded + out = out[decoded/4:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// decompress4x_main_loop_x86 is an x86 assembler implementation +// of Decompress1X when tablelog > 8. +// +//go:noescape +func decompress1x_main_loop_amd64(ctx *decompress1xContext) + +// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation +// of Decompress1X when tablelog > 8. +// +//go:noescape +func decompress1x_main_loop_bmi2(ctx *decompress1xContext) + +type decompress1xContext struct { + pbr *bitReaderShifted + peekBits uint8 + out *byte + outCap int + tbl *dEntrySingle + decoded int +} + +// Error reported by asm implementations +const error_max_decoded_size_exeeded = -1 + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:maxDecodedSize] + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + + if maxDecodedSize >= 4 { + ctx := decompress1xContext{ + pbr: &br, + out: &dst[0], + outCap: maxDecodedSize, + peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() + tbl: &d.dt.single[0], + } + + if cpuinfo.HasBMI2() { + decompress1x_main_loop_bmi2(&ctx) + } else { + decompress1x_main_loop_amd64(&ctx) + } + if ctx.decoded == error_max_decoded_size_exeeded { + return nil, ErrMaxDecodedSizeExceeded + } + + dst = dst[:ctx.decoded] + } + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if len(dst) >= maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + return dst, br.close() +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s new file mode 100644 index 0000000000000..c4c7ab2d1fe35 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s @@ -0,0 +1,830 @@ +// Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT. + +//go:build amd64 && !appengine && !noasm && gc + +// func decompress4x_main_loop_amd64(ctx *decompress4xContext) +TEXT ·decompress4x_main_loop_amd64(SB), $0-8 + // Preload values + MOVQ ctx+0(FP), AX + MOVBQZX 8(AX), DI + MOVQ 16(AX), BX + MOVQ 48(AX), SI + MOVQ 24(AX), R8 + MOVQ 32(AX), R9 + MOVQ (AX), R10 + + // Main loop +main_loop: + XORL DX, DX + CMPQ BX, SI + SETGE DL + + // br0.fillFast32() + MOVQ 32(R10), R11 + MOVBQZX 40(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill0 + MOVQ 24(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ (R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 24(R10) + ORQ R13, R11 + + // exhausted += (br0.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill0: + // val0 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br0.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (BX) + + // update the bitreader structure + MOVQ R11, 32(R10) + MOVB R12, 40(R10) + + // br1.fillFast32() + MOVQ 80(R10), R11 + MOVBQZX 88(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill1 + MOVQ 72(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ 48(R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 72(R10) + ORQ R13, R11 + + // exhausted += (br1.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill1: + // val0 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br1.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (BX)(R8*1) + + // update the bitreader structure + MOVQ R11, 80(R10) + MOVB R12, 88(R10) + + // br2.fillFast32() + MOVQ 128(R10), R11 + MOVBQZX 136(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill2 + MOVQ 120(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ 96(R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 120(R10) + ORQ R13, R11 + + // exhausted += (br2.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill2: + // val0 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br2.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (BX)(R8*2) + + // update the bitreader structure + MOVQ R11, 128(R10) + MOVB R12, 136(R10) + + // br3.fillFast32() + MOVQ 176(R10), R11 + MOVBQZX 184(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill3 + MOVQ 168(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ 144(R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 168(R10) + ORQ R13, R11 + + // exhausted += (br3.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill3: + // val0 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br3.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + LEAQ (R8)(R8*2), CX + MOVW AX, (BX)(CX*1) + + // update the bitreader structure + MOVQ R11, 176(R10) + MOVB R12, 184(R10) + ADDQ $0x02, BX + TESTB DL, DL + JZ main_loop + MOVQ ctx+0(FP), AX + SUBQ 16(AX), BX + SHLQ $0x02, BX + MOVQ BX, 40(AX) + RET + +// func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) +TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8 + // Preload values + MOVQ ctx+0(FP), CX + MOVBQZX 8(CX), DI + MOVQ 16(CX), BX + MOVQ 48(CX), SI + MOVQ 24(CX), R8 + MOVQ 32(CX), R9 + MOVQ (CX), R10 + + // Main loop +main_loop: + XORL DX, DX + CMPQ BX, SI + SETGE DL + + // br0.fillFast32() + MOVQ 32(R10), R11 + MOVBQZX 40(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill0 + MOVQ 24(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ (R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 24(R10) + ORQ R14, R11 + + // exhausted += (br0.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill0: + // val0 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (BX) + + // update the bitreader structure + MOVQ R11, 32(R10) + MOVB R12, 40(R10) + + // br1.fillFast32() + MOVQ 80(R10), R11 + MOVBQZX 88(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill1 + MOVQ 72(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 48(R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 72(R10) + ORQ R14, R11 + + // exhausted += (br1.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill1: + // val0 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (BX)(R8*1) + + // update the bitreader structure + MOVQ R11, 80(R10) + MOVB R12, 88(R10) + + // br2.fillFast32() + MOVQ 128(R10), R11 + MOVBQZX 136(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill2 + MOVQ 120(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 96(R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 120(R10) + ORQ R14, R11 + + // exhausted += (br2.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill2: + // val0 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (BX)(R8*2) + + // update the bitreader structure + MOVQ R11, 128(R10) + MOVB R12, 136(R10) + + // br3.fillFast32() + MOVQ 176(R10), R11 + MOVBQZX 184(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill3 + MOVQ 168(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 144(R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 168(R10) + ORQ R14, R11 + + // exhausted += (br3.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill3: + // val0 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + LEAQ (R8)(R8*2), CX + MOVL AX, (BX)(CX*1) + + // update the bitreader structure + MOVQ R11, 176(R10) + MOVB R12, 184(R10) + ADDQ $0x04, BX + TESTB DL, DL + JZ main_loop + MOVQ ctx+0(FP), AX + SUBQ 16(AX), BX + SHLQ $0x02, BX + MOVQ BX, 40(AX) + RET + +// func decompress1x_main_loop_amd64(ctx *decompress1xContext) +TEXT ·decompress1x_main_loop_amd64(SB), $0-8 + MOVQ ctx+0(FP), CX + MOVQ 16(CX), DX + MOVQ 24(CX), BX + CMPQ BX, $0x04 + JB error_max_decoded_size_exceeded + LEAQ (DX)(BX*1), BX + MOVQ (CX), SI + MOVQ (SI), R8 + MOVQ 24(SI), R9 + MOVQ 32(SI), R10 + MOVBQZX 40(SI), R11 + MOVQ 32(CX), SI + MOVBQZX 8(CX), DI + JMP loop_condition + +main_loop: + // Check if we have room for 4 bytes in the output buffer + LEAQ 4(DX), CX + CMPQ CX, BX + JGE error_max_decoded_size_exceeded + + // Decode 4 values + CMPQ R11, $0x20 + JL bitReader_fillFast_1_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), R12 + MOVQ R11, CX + SHLQ CL, R12 + ORQ R12, R10 + +bitReader_fillFast_1_end: + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + BSWAPL AX + CMPQ R11, $0x20 + JL bitReader_fillFast_2_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), R12 + MOVQ R11, CX + SHLQ CL, R12 + ORQ R12, R10 + +bitReader_fillFast_2_end: + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + BSWAPL AX + + // Store the decoded values + MOVL AX, (DX) + ADDQ $0x04, DX + +loop_condition: + CMPQ R9, $0x08 + JGE main_loop + + // Update ctx structure + MOVQ ctx+0(FP), AX + SUBQ 16(AX), DX + MOVQ DX, 40(AX) + MOVQ (AX), AX + MOVQ R9, 24(AX) + MOVQ R10, 32(AX) + MOVB R11, 40(AX) + RET + + // Report error +error_max_decoded_size_exceeded: + MOVQ ctx+0(FP), AX + MOVQ $-1, CX + MOVQ CX, 40(AX) + RET + +// func decompress1x_main_loop_bmi2(ctx *decompress1xContext) +// Requires: BMI2 +TEXT ·decompress1x_main_loop_bmi2(SB), $0-8 + MOVQ ctx+0(FP), CX + MOVQ 16(CX), DX + MOVQ 24(CX), BX + CMPQ BX, $0x04 + JB error_max_decoded_size_exceeded + LEAQ (DX)(BX*1), BX + MOVQ (CX), SI + MOVQ (SI), R8 + MOVQ 24(SI), R9 + MOVQ 32(SI), R10 + MOVBQZX 40(SI), R11 + MOVQ 32(CX), SI + MOVBQZX 8(CX), DI + JMP loop_condition + +main_loop: + // Check if we have room for 4 bytes in the output buffer + LEAQ 4(DX), CX + CMPQ CX, BX + JGE error_max_decoded_size_exceeded + + // Decode 4 values + CMPQ R11, $0x20 + JL bitReader_fillFast_1_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), CX + SHLXQ R11, CX, CX + ORQ CX, R10 + +bitReader_fillFast_1_end: + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + BSWAPL AX + CMPQ R11, $0x20 + JL bitReader_fillFast_2_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), CX + SHLXQ R11, CX, CX + ORQ CX, R10 + +bitReader_fillFast_2_end: + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + BSWAPL AX + + // Store the decoded values + MOVL AX, (DX) + ADDQ $0x04, DX + +loop_condition: + CMPQ R9, $0x08 + JGE main_loop + + // Update ctx structure + MOVQ ctx+0(FP), AX + SUBQ 16(AX), DX + MOVQ DX, 40(AX) + MOVQ (AX), AX + MOVQ R9, 24(AX) + MOVQ R10, 32(AX) + MOVB R11, 40(AX) + RET + + // Report error +error_max_decoded_size_exceeded: + MOVQ ctx+0(FP), AX + MOVQ $-1, CX + MOVQ CX, 40(AX) + RET diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go new file mode 100644 index 0000000000000..908c17de63fc8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go @@ -0,0 +1,299 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +// This file contains a generic implementation of Decoder.Decompress4X. +package huff0 + +import ( + "errors" + "fmt" +) + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 2 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + const stream = 0 + const stream2 = 1 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + off += 2 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + // There must at least be 3 buffers left. + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + //copy(out[dstEvery*3:], buf[3][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress1X8Bit(dst, src) + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + dt := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + for br.off >= 8 { + br.fillFast() + v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + // Refill + br.fillFast() + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if false && br.bitsRead >= 32 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value = (br.value << 32) | uint64(low) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value = (br.value << 8) | uint64(br.in[br.off-1]) + br.bitsRead -= 8 + br.off-- + } + } + } + if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go new file mode 100644 index 0000000000000..77ecd68e0a7bc --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -0,0 +1,337 @@ +// Package huff0 provides fast huffman encoding as used in zstd. +// +// See README.md at https://github.com/klauspost/compress/tree/master/huff0 for details. +package huff0 + +import ( + "errors" + "fmt" + "math" + "math/bits" + "sync" + + "github.com/klauspost/compress/fse" +) + +const ( + maxSymbolValue = 255 + + // zstandard limits tablelog to 11, see: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#huffman-tree-description + tableLogMax = 11 + tableLogDefault = 11 + minTablelog = 5 + huffNodesLen = 512 + + // BlockSizeMax is maximum input size for a single block uncompressed. + BlockSizeMax = 1<<18 - 1 +) + +var ( + // ErrIncompressible is returned when input is judged to be too hard to compress. + ErrIncompressible = errors.New("input is not compressible") + + // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. + ErrUseRLE = errors.New("input is single value repeated") + + // ErrTooBig is return if input is too large for a single block. + ErrTooBig = errors.New("input too big") + + // ErrMaxDecodedSizeExceeded is return if input is too large for a single block. + ErrMaxDecodedSizeExceeded = errors.New("maximum output size exceeded") +) + +type ReusePolicy uint8 + +const ( + // ReusePolicyAllow will allow reuse if it produces smaller output. + ReusePolicyAllow ReusePolicy = iota + + // ReusePolicyPrefer will re-use aggressively if possible. + // This will not check if a new table will produce smaller output, + // except if the current table is impossible to use or + // compressed output is bigger than input. + ReusePolicyPrefer + + // ReusePolicyNone will disable re-use of tables. + // This is slightly faster than ReusePolicyAllow but may produce larger output. + ReusePolicyNone + + // ReusePolicyMust must allow reuse and produce smaller output. + ReusePolicyMust +) + +type Scratch struct { + count [maxSymbolValue + 1]uint32 + + // Per block parameters. + // These can be used to override compression parameters of the block. + // Do not touch, unless you know what you are doing. + + // Out is output buffer. + // If the scratch is re-used before the caller is done processing the output, + // set this field to nil. + // Otherwise the output buffer will be re-used for next Compression/Decompression step + // and allocation will be avoided. + Out []byte + + // OutTable will contain the table data only, if a new table has been generated. + // Slice of the returned data. + OutTable []byte + + // OutData will contain the compressed data. + // Slice of the returned data. + OutData []byte + + // MaxDecodedSize will set the maximum allowed output size. + // This value will automatically be set to BlockSizeMax if not set. + // Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded. + MaxDecodedSize int + + srcLen int + + // MaxSymbolValue will override the maximum symbol value of the next block. + MaxSymbolValue uint8 + + // TableLog will attempt to override the tablelog for the next block. + // Must be <= 11 and >= 5. + TableLog uint8 + + // Reuse will specify the reuse policy + Reuse ReusePolicy + + // WantLogLess allows to specify a log 2 reduction that should at least be achieved, + // otherwise the block will be returned as incompressible. + // The reduction should then at least be (input size >> WantLogLess) + // If WantLogLess == 0 any improvement will do. + WantLogLess uint8 + + symbolLen uint16 // Length of active part of the symbol table. + maxCount int // count of the most probable symbol + clearCount bool // clear count + actualTableLog uint8 // Selected tablelog. + prevTableLog uint8 // Tablelog for previous table + prevTable cTable // Table used for previous compression. + cTable cTable // compression table + dt dTable // decompression table + nodes []nodeElt + tmpOut [4][]byte + fse *fse.Scratch + decPool sync.Pool // *[4][256]byte buffers. + huffWeight [maxSymbolValue + 1]byte +} + +// TransferCTable will transfer the previously used compression table. +func (s *Scratch) TransferCTable(src *Scratch) { + if cap(s.prevTable) < len(src.prevTable) { + s.prevTable = make(cTable, 0, maxSymbolValue+1) + } + s.prevTable = s.prevTable[:len(src.prevTable)] + copy(s.prevTable, src.prevTable) + s.prevTableLog = src.prevTableLog +} + +func (s *Scratch) prepare(in []byte) (*Scratch, error) { + if len(in) > BlockSizeMax { + return nil, ErrTooBig + } + if s == nil { + s = &Scratch{} + } + if s.MaxSymbolValue == 0 { + s.MaxSymbolValue = maxSymbolValue + } + if s.TableLog == 0 { + s.TableLog = tableLogDefault + } + if s.TableLog > tableLogMax || s.TableLog < minTablelog { + return nil, fmt.Errorf(" invalid tableLog %d (%d -> %d)", s.TableLog, minTablelog, tableLogMax) + } + if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax { + s.MaxDecodedSize = BlockSizeMax + } + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + if cap(s.Out) == 0 { + s.Out = make([]byte, 0, len(in)) + } + s.Out = s.Out[:0] + + s.OutTable = nil + s.OutData = nil + if cap(s.nodes) < huffNodesLen+1 { + s.nodes = make([]nodeElt, 0, huffNodesLen+1) + } + s.nodes = s.nodes[:0] + if s.fse == nil { + s.fse = &fse.Scratch{} + } + s.srcLen = len(in) + + return s, nil +} + +type cTable []cTableEntry + +func (c cTable) write(s *Scratch) error { + var ( + // precomputed conversion table + bitsToWeight [tableLogMax + 1]byte + huffLog = s.actualTableLog + // last weight is not saved. + maxSymbolValue = uint8(s.symbolLen - 1) + huffWeight = s.huffWeight[:256] + ) + const ( + maxFSETableLog = 6 + ) + // convert to weight + bitsToWeight[0] = 0 + for n := uint8(1); n < huffLog+1; n++ { + bitsToWeight[n] = huffLog + 1 - n + } + + // Acquire histogram for FSE. + hist := s.fse.Histogram() + hist = hist[:256] + for i := range hist[:16] { + hist[i] = 0 + } + for n := uint8(0); n < maxSymbolValue; n++ { + v := bitsToWeight[c[n].nBits] & 15 + huffWeight[n] = v + hist[v]++ + } + + // FSE compress if feasible. + if maxSymbolValue >= 2 { + huffMaxCnt := uint32(0) + huffMax := uint8(0) + for i, v := range hist[:16] { + if v == 0 { + continue + } + huffMax = byte(i) + if v > huffMaxCnt { + huffMaxCnt = v + } + } + s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) + s.fse.TableLog = maxFSETableLog + b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) + if err == nil && len(b) < int(s.symbolLen>>1) { + s.Out = append(s.Out, uint8(len(b))) + s.Out = append(s.Out, b...) + return nil + } + // Unable to compress (RLE/uncompressible) + } + // write raw values as 4-bits (max : 15) + if maxSymbolValue > (256 - 128) { + // should not happen : likely means source cannot be compressed + return ErrIncompressible + } + op := s.Out + // special case, pack weights 4 bits/weight. + op = append(op, 128|(maxSymbolValue-1)) + // be sure it doesn't cause msan issue in final combination + huffWeight[maxSymbolValue] = 0 + for n := uint16(0); n < uint16(maxSymbolValue); n += 2 { + op = append(op, (huffWeight[n]<<4)|huffWeight[n+1]) + } + s.Out = op + return nil +} + +func (c cTable) estTableSize(s *Scratch) (sz int, err error) { + var ( + // precomputed conversion table + bitsToWeight [tableLogMax + 1]byte + huffLog = s.actualTableLog + // last weight is not saved. + maxSymbolValue = uint8(s.symbolLen - 1) + huffWeight = s.huffWeight[:256] + ) + const ( + maxFSETableLog = 6 + ) + // convert to weight + bitsToWeight[0] = 0 + for n := uint8(1); n < huffLog+1; n++ { + bitsToWeight[n] = huffLog + 1 - n + } + + // Acquire histogram for FSE. + hist := s.fse.Histogram() + hist = hist[:256] + for i := range hist[:16] { + hist[i] = 0 + } + for n := uint8(0); n < maxSymbolValue; n++ { + v := bitsToWeight[c[n].nBits] & 15 + huffWeight[n] = v + hist[v]++ + } + + // FSE compress if feasible. + if maxSymbolValue >= 2 { + huffMaxCnt := uint32(0) + huffMax := uint8(0) + for i, v := range hist[:16] { + if v == 0 { + continue + } + huffMax = byte(i) + if v > huffMaxCnt { + huffMaxCnt = v + } + } + s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) + s.fse.TableLog = maxFSETableLog + b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) + if err == nil && len(b) < int(s.symbolLen>>1) { + sz += 1 + len(b) + return sz, nil + } + // Unable to compress (RLE/uncompressible) + } + // write raw values as 4-bits (max : 15) + if maxSymbolValue > (256 - 128) { + // should not happen : likely means source cannot be compressed + return 0, ErrIncompressible + } + // special case, pack weights 4 bits/weight. + sz += 1 + int(maxSymbolValue/2) + return sz, nil +} + +// estimateSize returns the estimated size in bytes of the input represented in the +// histogram supplied. +func (c cTable) estimateSize(hist []uint32) int { + nbBits := uint32(7) + for i, v := range c[:len(hist)] { + nbBits += uint32(v.nBits) * hist[i] + } + return int(nbBits >> 3) +} + +// minSize returns the minimum possible size considering the shannon limit. +func (s *Scratch) minSize(total int) int { + nbBits := float64(7) + fTotal := float64(total) + for _, v := range s.count[:s.symbolLen] { + n := float64(v) + if n > 0 { + nbBits += math.Log2(fTotal/n) * n + } + } + return int(nbBits) >> 3 +} + +func highBit32(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go new file mode 100644 index 0000000000000..3954c51219b2b --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go @@ -0,0 +1,34 @@ +// Package cpuinfo gives runtime info about the current CPU. +// +// This is a very limited module meant for use internally +// in this project. For more versatile solution check +// https://github.com/klauspost/cpuid. +package cpuinfo + +// HasBMI1 checks whether an x86 CPU supports the BMI1 extension. +func HasBMI1() bool { + return hasBMI1 +} + +// HasBMI2 checks whether an x86 CPU supports the BMI2 extension. +func HasBMI2() bool { + return hasBMI2 +} + +// DisableBMI2 will disable BMI2, for testing purposes. +// Call returned function to restore previous state. +func DisableBMI2() func() { + old := hasBMI2 + hasBMI2 = false + return func() { + hasBMI2 = old + } +} + +// HasBMI checks whether an x86 CPU supports both BMI1 and BMI2 extensions. +func HasBMI() bool { + return HasBMI1() && HasBMI2() +} + +var hasBMI1 bool +var hasBMI2 bool diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go new file mode 100644 index 0000000000000..e802579c4f967 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go @@ -0,0 +1,11 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package cpuinfo + +// go:noescape +func x86extensions() (bmi1, bmi2 bool) + +func init() { + hasBMI1, hasBMI2 = x86extensions() +} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s new file mode 100644 index 0000000000000..4465fbe9e905b --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s @@ -0,0 +1,36 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" +#include "funcdata.h" +#include "go_asm.h" + +TEXT ·x86extensions(SB), NOSPLIT, $0 + // 1. determine max EAX value + XORQ AX, AX + CPUID + + CMPQ AX, $7 + JB unsupported + + // 2. EAX = 7, ECX = 0 --- see Table 3-8 "Information Returned by CPUID Instruction" + MOVQ $7, AX + MOVQ $0, CX + CPUID + + BTQ $3, BX // bit 3 = BMI1 + SETCS AL + + BTQ $8, BX // bit 8 = BMI2 + SETCS AH + + MOVB AL, bmi1+0(FP) + MOVB AH, bmi2+1(FP) + RET + +unsupported: + XORQ AX, AX + MOVB AL, bmi1+0(FP) + MOVB AL, bmi2+1(FP) + RET diff --git a/vendor/github.com/klauspost/compress/internal/snapref/LICENSE b/vendor/github.com/klauspost/compress/internal/snapref/LICENSE new file mode 100644 index 0000000000000..6050c10f4c8b4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode.go b/vendor/github.com/klauspost/compress/internal/snapref/decode.go new file mode 100644 index 0000000000000..40796a49d6591 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/decode.go @@ -0,0 +1,264 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +import ( + "encoding/binary" + "errors" + "io" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("snappy: corrupt input") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("snappy: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 + decodeErrCodeUnsupportedLiteralLength = 2 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// Decode handles the Snappy block format, not the Snappy stream format. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= len(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + switch decode(dst, src[s:]) { + case 0: + return dst, nil + case decodeErrCodeUnsupportedLiteralLength: + return nil, errUnsupportedLiteralLength + } + return nil, ErrCorrupt +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + decoded: make([]byte, maxBlockSize), + buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +// +// Reader handles the Snappy stream format, not the Snappy block format. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + readHeader bool +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.readHeader = false +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +func (r *Reader) fill() error { + for r.i >= r.j { + if !r.readFull(r.buf[:4], true) { + return r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + r.err = ErrUnsupported + return r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return r.err + } + if n > len(r.decoded) { + r.err = ErrCorrupt + return r.err + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if n > len(r.decoded) { + r.err = ErrCorrupt + return r.err + } + if !r.readFull(r.decoded[:n], false) { + return r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return r.err + } + for i := 0; i < len(magicBody); i++ { + if r.buf[i] != magicBody[i] { + r.err = ErrCorrupt + return r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return r.err + } + } + + return nil +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil +} + +// ReadByte satisfies the io.ByteReader interface. +func (r *Reader) ReadByte() (byte, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + c := r.decoded[r.i] + r.i++ + return c, nil +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go new file mode 100644 index 0000000000000..77395a6b8b9e0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go @@ -0,0 +1,113 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func decode(dst, src []byte) int { + var d, s, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length <= 0 { + return decodeErrCodeUnsupportedLiteralLength + } + if length > len(dst)-d || length > len(src)-s { + return decodeErrCodeCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset >= length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode.go b/vendor/github.com/klauspost/compress/internal/snapref/encode.go new file mode 100644 index 0000000000000..13c6040a5dedb --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode.go @@ -0,0 +1,289 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +import ( + "encoding/binary" + "errors" + "io" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// Encode handles the Snappy block format, not the Snappy stream format. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return dst[:d] +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 16 - 1 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// could be encoded with a copy tag. This is the minimum with respect to the +// algorithm used by encodeBlock, not a minimum enforced by the file format. +// +// The encoded output must start with at least a 1 byte literal, as there are +// no previous bytes to copy. A minimal (1 byte) copy after that, generated +// from an emitCopy call in encodeBlock's main loop, would require at least +// another inputMargin bytes, for the reason above: we want any emitLiteral +// calls inside encodeBlock's main loop to use the fast path if possible, which +// requires being able to overrun by inputMargin bytes. Thus, +// minNonLiteralBlockSize equals 1 + 1 + inputMargin. +// +// The C++ code doesn't use this exact threshold, but it could, as discussed at +// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion +// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an +// optimization. It should not affect the encoded form. This is tested by +// TestSameEncodingAsCppShortCopies. +const minNonLiteralBlockSize = 1 + 1 + inputMargin + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if n > 0xffffffff { + return -1 + } + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + n = 32 + n + n/6 + if n > 0xffffffff { + return -1 + } + return int(n) +} + +var errClosed = errors.New("snappy: Writer is closed") + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + obuf: make([]byte, obufLen), + } +} + +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + ibuf: make([]byte, 0, maxBlockSize), + obuf: make([]byte, obufLen), + } +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +// +// Writer handles the Snappy stream format, not the Snappy block format. +type Writer struct { + w io.Writer + err error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + // + // Its use is optional. For backwards compatibility, Writers created by the + // NewWriter function have ibuf == nil, do not buffer incoming bytes, and + // therefore do not need to be Flush'ed or Close'd. + ibuf []byte + + // obuf is a buffer for the outgoing (compressed) bytes. + obuf []byte + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool +} + +// Reset discards the writer's state and switches the Snappy writer to write to +// w. This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + w.w = writer + w.err = nil + if w.ibuf != nil { + w.ibuf = w.ibuf[:0] + } + w.wroteStreamHeader = false +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if w.ibuf == nil { + // Do not buffer incoming bytes. This does not perform or compress well + // if the caller of Writer.Write writes many small slices. This + // behavior is therefore deprecated, but still supported for backwards + // compatibility with code that doesn't explicitly Flush or Close. + return w.write(p) + } + + // The remainder of this method is based on bufio.Writer.Write from the + // standard library. + + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.Flush() + } + nRet += n + p = p[n:] + } + if w.err != nil { + return nRet, w.err + } + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if w.err != nil { + return 0, w.err + } + for len(p) > 0 { + obufStart := len(magicChunk) + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + copy(w.obuf, magicChunk) + obufStart = 0 + } + + var uncompressed []byte + if len(p) > maxBlockSize { + uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] + } else { + uncompressed, p = p, nil + } + checksum := crc(uncompressed) + + // Compress the buffer, discarding the result if the improvement + // isn't at least 12.5%. + compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) + chunkType := uint8(chunkTypeCompressedData) + chunkLen := 4 + len(compressed) + obufEnd := obufHeaderLen + len(compressed) + if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { + chunkType = chunkTypeUncompressedData + chunkLen = 4 + len(uncompressed) + obufEnd = obufHeaderLen + } + + // Fill in the per-chunk header that comes before the body. + w.obuf[len(magicChunk)+0] = chunkType + w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) + w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) + w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) + w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) + w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) + w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) + w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) + + if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { + w.err = err + return nRet, err + } + if chunkType == chunkTypeUncompressedData { + if _, err := w.w.Write(uncompressed); err != nil { + w.err = err + return nRet, err + } + } + nRet += len(uncompressed) + } + return nRet, nil +} + +// Flush flushes the Writer to its underlying io.Writer. +func (w *Writer) Flush() error { + if w.err != nil { + return w.err + } + if len(w.ibuf) == 0 { + return nil + } + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err +} + +// Close calls Flush and then closes the Writer. +func (w *Writer) Close() error { + w.Flush() + ret := w.err + if w.err == nil { + w.err = errClosed + } + return ret +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go new file mode 100644 index 0000000000000..2754bac6f1652 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go @@ -0,0 +1,250 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= len(lit) && len(lit) <= 65536 +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + default: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + } + return i + copy(dst[i:], lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= 65535 +// 4 <= length && length <= 65535 +func emitCopy(dst []byte, offset, length int) int { + i := 0 + // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The + // threshold for this loop is a little higher (at 68 = 64 + 4), and the + // length emitted down below is a little lower (at 60 = 64 - 4), because + // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed + // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as + // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as + // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a + // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an + // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. + for length >= 68 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[i+0] = 63<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 64 + } + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + dst[i+0] = 59<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 60 + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + return i + 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + return i + 2 +} + +func hash(u, shift uint32) uint32 { + return (u * 0x1e35a7bd) >> shift +} + +// EncodeBlockInto exposes encodeBlock but checks dst size. +func EncodeBlockInto(dst, src []byte) (d int) { + if MaxEncodedLen(len(src)) > len(dst) { + return 0 + } + + // encodeBlock breaks on too big blocks, so split. + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return d +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxBlockSize and maxBlockSize == 65536. + const ( + maxTableSize = 1 << 14 + // tableMask is redundant, but helps the compiler eliminate bounds + // checks. + tableMask = maxTableSize - 1 + ) + shift := uint32(32 - 8) + for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + shift-- + } + // In Go, all array elements are zero-initialized, so there is no advantage + // to a smaller tableSize per se. However, it matches the C++ algorithm, + // and in the asm versions of this code, we can get away with zeroing only + // the first tableSize elements. + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s), shift) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS), shift) + if load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { + } + + d += emitCopy(dst[d:], base-candidate, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x>>0), shift) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x>>8), shift) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x>>16), shift) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/snappy.go b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go new file mode 100644 index 0000000000000..34d01f4aa63af --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go @@ -0,0 +1,98 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snapref implements the Snappy compression format. It aims for very +// high speeds and reasonable compression. +// +// There are actually two Snappy formats: block and stream. They are related, +// but different: trying to decompress block-compressed data as a Snappy stream +// will fail, and vice versa. The block format is the Decode and Encode +// functions and the stream format is the Reader and Writer types. +// +// The block format, the more common case, is used when the complete size (the +// number of bytes) of the original data is known upfront, at the time +// compression starts. The stream format, also known as the framing format, is +// for when that isn't always true. +// +// The canonical, C++ implementation is at https://github.com/google/snappy and +// it only implements the block format. +package snapref + +import ( + "hash/crc32" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer issued by most + encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicBody = "sNaPpY" + + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 + + // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + maxEncodedLenOfMaxBlockSize = 76490 + + obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize + obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/vendor/github.com/klauspost/compress/s2sx.mod b/vendor/github.com/klauspost/compress/s2sx.mod new file mode 100644 index 0000000000000..5a4412f90701a --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2sx.mod @@ -0,0 +1,4 @@ +module github.com/klauspost/compress + +go 1.19 + diff --git a/vendor/github.com/klauspost/compress/s2sx.sum b/vendor/github.com/klauspost/compress/s2sx.sum new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md new file mode 100644 index 0000000000000..92e2347bbc0ea --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -0,0 +1,441 @@ +# zstd + +[Zstandard](https://facebook.github.io/zstd/) is a real-time compression algorithm, providing high compression ratios. +It offers a very wide range of compression / speed trade-off, while being backed by a very fast decoder. +A high performance compression algorithm is implemented. For now focused on speed. + +This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content. + +This package is pure Go and without use of "unsafe". + +The `zstd` package is provided as open source software using a Go standard license. + +Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors. + +For seekable zstd streams, see [this excellent package](https://github.com/SaveTheRbtz/zstd-seekable-format-go). + +## Installation + +Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`. + +[![Go Reference](https://pkg.go.dev/badge/github.com/klauspost/compress/zstd.svg)](https://pkg.go.dev/github.com/klauspost/compress/zstd) + +## Compressor + +### Status: + +STABLE - there may always be subtle bugs, a wide variety of content has been tested and the library is actively +used by several projects. This library is being [fuzz-tested](https://github.com/klauspost/compress-fuzz) for all updates. + +There may still be specific combinations of data types/size/settings that could lead to edge cases, +so as always, testing is recommended. + +For now, a high speed (fastest) and medium-fast (default) compressor has been implemented. + +* The "Fastest" compression ratio is roughly equivalent to zstd level 1. +* The "Default" compression ratio is roughly equivalent to zstd level 3 (default). +* The "Better" compression ratio is roughly equivalent to zstd level 7. +* The "Best" compression ratio is roughly equivalent to zstd level 11. + +In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode. +The compression ratio compared to stdlib is around level 3, but usually 3x as fast. + + +### Usage + +An Encoder can be used for either compressing a stream via the +`io.WriteCloser` interface supported by the Encoder or as multiple independent +tasks via the `EncodeAll` function. +Smaller encodes are encouraged to use the EncodeAll function. +Use `NewWriter` to create a new instance that can be used for both. + +To create a writer with default options, do like this: + +```Go +// Compress input to output. +func Compress(in io.Reader, out io.Writer) error { + enc, err := zstd.NewWriter(out) + if err != nil { + return err + } + _, err = io.Copy(enc, in) + if err != nil { + enc.Close() + return err + } + return enc.Close() +} +``` + +Now you can encode by writing data to `enc`. The output will be finished writing when `Close()` is called. +Even if your encode fails, you should still call `Close()` to release any resources that may be held up. + +The above is fine for big encodes. However, whenever possible try to *reuse* the writer. + +To reuse the encoder, you can use the `Reset(io.Writer)` function to change to another output. +This will allow the encoder to reuse all resources and avoid wasteful allocations. + +Currently stream encoding has 'light' concurrency, meaning up to 2 goroutines can be working on part +of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is likely to change +in the future. So if you want to limit concurrency for future updates, specify the concurrency +you would like. + +If you would like stream encoding to be done without spawning async goroutines, use `WithEncoderConcurrency(1)` +which will compress input as each block is completed, blocking on writes until each has completed. + +You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined +compression settings can be specified. + +#### Future Compatibility Guarantees + +This will be an evolving project. When using this package it is important to note that both the compression efficiency and speed may change. + +The goal will be to keep the default efficiency at the default zstd (level 3). +However the encoding should never be assumed to remain the same, +and you should not use hashes of compressed output for similarity checks. + +The Encoder can be assumed to produce the same output from the exact same code version. +However, the may be modes in the future that break this, +although they will not be enabled without an explicit option. + +This encoder is not designed to (and will probably never) output the exact same bitstream as the reference encoder. + +Also note, that the cgo decompressor currently does not [report all errors on invalid input](https://github.com/DataDog/zstd/issues/59), +[omits error checks](https://github.com/DataDog/zstd/issues/61), [ignores checksums](https://github.com/DataDog/zstd/issues/43) +and seems to ignore concatenated streams, even though [it is part of the spec](https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frames). + +#### Blocks + +For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`. + +`EncodeAll` will encode all input in src and append it to dst. +This function can be called concurrently. +Each call will only run on a same goroutine as the caller. + +Encoded blocks can be concatenated and the result will be the combined input stream. +Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`. + +Especially when encoding blocks you should take special care to reuse the encoder. +This will effectively make it run without allocations after a warmup period. +To make it run completely without allocations, supply a destination buffer with space for all content. + +```Go +import "github.com/klauspost/compress/zstd" + +// Create a writer that caches compressors. +// For this operation type we supply a nil Reader. +var encoder, _ = zstd.NewWriter(nil) + +// Compress a buffer. +// If you have a destination buffer, the allocation in the call can also be eliminated. +func Compress(src []byte) []byte { + return encoder.EncodeAll(src, make([]byte, 0, len(src))) +} +``` + +You can control the maximum number of concurrent encodes using the `WithEncoderConcurrency(n)` +option when creating the writer. + +Using the Encoder for both a stream and individual blocks concurrently is safe. + +### Performance + +I have collected some speed examples to compare speed and compression against other compressors. + +* `file` is the input file. +* `out` is the compressor used. `zskp` is this package. `zstd` is the Datadog cgo library. `gzstd/gzkp` is gzip standard and this library. +* `level` is the compression level used. For `zskp` level 1 is "fastest", level 2 is "default"; 3 is "better", 4 is "best". +* `insize`/`outsize` is the input/output size. +* `millis` is the number of milliseconds used for compression. +* `mb/s` is megabytes (2^20 bytes) per second. + +``` +Silesia Corpus: +http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip + +This package: +file out level insize outsize millis mb/s +silesia.tar zskp 1 211947520 73821326 634 318.47 +silesia.tar zskp 2 211947520 67655404 1508 133.96 +silesia.tar zskp 3 211947520 64746933 3000 67.37 +silesia.tar zskp 4 211947520 60073508 16926 11.94 + +cgo zstd: +silesia.tar zstd 1 211947520 73605392 543 371.56 +silesia.tar zstd 3 211947520 66793289 864 233.68 +silesia.tar zstd 6 211947520 62916450 1913 105.66 +silesia.tar zstd 9 211947520 60212393 5063 39.92 + +gzip, stdlib/this package: +silesia.tar gzstd 1 211947520 80007735 1498 134.87 +silesia.tar gzkp 1 211947520 80088272 1009 200.31 + +GOB stream of binary data. Highly compressible. +https://files.klauspost.com/compress/gob-stream.7z + +file out level insize outsize millis mb/s +gob-stream zskp 1 1911399616 233948096 3230 564.34 +gob-stream zskp 2 1911399616 203997694 4997 364.73 +gob-stream zskp 3 1911399616 173526523 13435 135.68 +gob-stream zskp 4 1911399616 162195235 47559 38.33 + +gob-stream zstd 1 1911399616 249810424 2637 691.26 +gob-stream zstd 3 1911399616 208192146 3490 522.31 +gob-stream zstd 6 1911399616 193632038 6687 272.56 +gob-stream zstd 9 1911399616 177620386 16175 112.70 + +gob-stream gzstd 1 1911399616 357382013 9046 201.49 +gob-stream gzkp 1 1911399616 359136669 4885 373.08 + +The test data for the Large Text Compression Benchmark is the first +10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. +http://mattmahoney.net/dc/textdata.html + +file out level insize outsize millis mb/s +enwik9 zskp 1 1000000000 343833605 3687 258.64 +enwik9 zskp 2 1000000000 317001237 7672 124.29 +enwik9 zskp 3 1000000000 291915823 15923 59.89 +enwik9 zskp 4 1000000000 261710291 77697 12.27 + +enwik9 zstd 1 1000000000 358072021 3110 306.65 +enwik9 zstd 3 1000000000 313734672 4784 199.35 +enwik9 zstd 6 1000000000 295138875 10290 92.68 +enwik9 zstd 9 1000000000 278348700 28549 33.40 + +enwik9 gzstd 1 1000000000 382578136 8608 110.78 +enwik9 gzkp 1 1000000000 382781160 5628 169.45 + +Highly compressible JSON file. +https://files.klauspost.com/compress/github-june-2days-2019.json.zst + +file out level insize outsize millis mb/s +github-june-2days-2019.json zskp 1 6273951764 697439532 9789 611.17 +github-june-2days-2019.json zskp 2 6273951764 610876538 18553 322.49 +github-june-2days-2019.json zskp 3 6273951764 517662858 44186 135.41 +github-june-2days-2019.json zskp 4 6273951764 464617114 165373 36.18 + +github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00 +github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57 +github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18 +github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16 + +github-june-2days-2019.json gzstd 1 6273951764 1164397768 26793 223.32 +github-june-2days-2019.json gzkp 1 6273951764 1120631856 17693 338.16 + +VM Image, Linux mint with a few installed applications: +https://files.klauspost.com/compress/rawstudio-mint14.7z + +file out level insize outsize millis mb/s +rawstudio-mint14.tar zskp 1 8558382592 3718400221 18206 448.29 +rawstudio-mint14.tar zskp 2 8558382592 3326118337 37074 220.15 +rawstudio-mint14.tar zskp 3 8558382592 3163842361 87306 93.49 +rawstudio-mint14.tar zskp 4 8558382592 2970480650 783862 10.41 + +rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27 +rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92 +rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77 +rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91 + +rawstudio-mint14.tar gzstd 1 8558382592 3926234992 51345 158.96 +rawstudio-mint14.tar gzkp 1 8558382592 3960117298 36722 222.26 + +CSV data: +https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst + +file out level insize outsize millis mb/s +nyc-taxi-data-10M.csv zskp 1 3325605752 641319332 9462 335.17 +nyc-taxi-data-10M.csv zskp 2 3325605752 588976126 17570 180.50 +nyc-taxi-data-10M.csv zskp 3 3325605752 529329260 32432 97.79 +nyc-taxi-data-10M.csv zskp 4 3325605752 474949772 138025 22.98 + +nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18 +nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07 +nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27 +nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12 + +nyc-taxi-data-10M.csv gzstd 1 3325605752 928654908 21270 149.11 +nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68 +``` + +## Decompressor + +Status: STABLE - there may still be subtle bugs, but a wide variety of content has been tested. + +This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz), +kindly supplied by [fuzzit.dev](https://fuzzit.dev/). +The main purpose of the fuzz testing is to ensure that it is not possible to crash the decoder, +or run it past its limits with ANY input provided. + +### Usage + +The package has been designed for two main usages, big streams of data and smaller in-memory buffers. +There are two main usages of the package for these. Both of them are accessed by creating a `Decoder`. + +For streaming use a simple setup could look like this: + +```Go +import "github.com/klauspost/compress/zstd" + +func Decompress(in io.Reader, out io.Writer) error { + d, err := zstd.NewReader(in) + if err != nil { + return err + } + defer d.Close() + + // Copy content... + _, err = io.Copy(out, d) + return err +} +``` + +It is important to use the "Close" function when you no longer need the Reader to stop running goroutines, +when running with default settings. +Goroutines will exit once an error has been returned, including `io.EOF` at the end of a stream. + +Streams are decoded concurrently in 4 asynchronous stages to give the best possible throughput. +However, if you prefer synchronous decompression, use `WithDecoderConcurrency(1)` which will decompress data +as it is being requested only. + +For decoding buffers, it could look something like this: + +```Go +import "github.com/klauspost/compress/zstd" + +// Create a reader that caches decompressors. +// For this operation type we supply a nil Reader. +var decoder, _ = zstd.NewReader(nil, zstd.WithDecoderConcurrency(0)) + +// Decompress a buffer. We don't supply a destination buffer, +// so it will be allocated by the decoder. +func Decompress(src []byte) ([]byte, error) { + return decoder.DecodeAll(src, nil) +} +``` + +Both of these cases should provide the functionality needed. +The decoder can be used for *concurrent* decompression of multiple buffers. +By default 4 decompressors will be created. + +It will only allow a certain number of concurrent operations to run. +To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. +It is possible to use `WithDecoderConcurrency(0)` to create GOMAXPROCS decoders. + +### Dictionaries + +Data compressed with [dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression) can be decompressed. + +Dictionaries are added individually to Decoders. +Dictionaries are generated by the `zstd --train` command and contains an initial state for the decoder. +To add a dictionary use the `WithDecoderDicts(dicts ...[]byte)` option with the dictionary data. +Several dictionaries can be added at once. + +The dictionary will be used automatically for the data that specifies them. +A re-used Decoder will still contain the dictionaries registered. + +When registering multiple dictionaries with the same ID, the last one will be used. + +It is possible to use dictionaries when compressing data. + +To enable a dictionary use `WithEncoderDict(dict []byte)`. Here only one dictionary will be used +and it will likely be used even if it doesn't improve compression. + +The used dictionary must be used to decompress the content. + +For any real gains, the dictionary should be built with similar data. +If an unsuitable dictionary is used the output may be slightly larger than using no dictionary. +Use the [zstd commandline tool](https://github.com/facebook/zstd/releases) to build a dictionary from sample data. +For information see [zstd dictionary information](https://github.com/facebook/zstd#the-case-for-small-data-compression). + +For now there is a fixed startup performance penalty for compressing content with dictionaries. +This will likely be improved over time. Just be aware to test performance when implementing. + +### Allocation-less operation + +The decoder has been designed to operate without allocations after a warmup. + +This means that you should *store* the decoder for best performance. +To re-use a stream decoder, use the `Reset(r io.Reader) error` to switch to another stream. +A decoder can safely be re-used even if the previous stream failed. + +To release the resources, you must call the `Close()` function on a decoder. +After this it can *no longer be reused*, but all running goroutines will be stopped. +So you *must* use this if you will no longer need the Reader. + +For decompressing smaller buffers a single decoder can be used. +When decoding buffers, you can supply a destination slice with length 0 and your expected capacity. +In this case no unneeded allocations should be made. + +### Concurrency + +The buffer decoder does everything on the same goroutine and does nothing concurrently. +It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that. + +The stream decoder will create goroutines that: + +1) Reads input and splits the input into blocks. +2) Decompression of literals. +3) Decompression of sequences. +4) Reconstruction of output stream. + +So effectively this also means the decoder will "read ahead" and prepare data to always be available for output. + +The concurrency level will, for streams, determine how many blocks ahead the compression will start. + +Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency. + +In practice this means that concurrency is often limited to utilizing about 3 cores effectively. + +### Benchmarks + +The first two are streaming decodes and the last are smaller inputs. + +Running on AMD Ryzen 9 3950X 16-Core Processor. AMD64 assembly used. + +``` +BenchmarkDecoderSilesia-32 5 206878840 ns/op 1024.50 MB/s 49808 B/op 43 allocs/op +BenchmarkDecoderEnwik9-32 1 1271809000 ns/op 786.28 MB/s 72048 B/op 52 allocs/op + +Concurrent blocks, performance: + +BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-32 67356 17857 ns/op 10321.96 MB/s 22.48 pct 102 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-32 266656 4421 ns/op 26823.21 MB/s 11.89 pct 19 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-32 20992 56842 ns/op 8477.17 MB/s 39.90 pct 754 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-32 27456 43932 ns/op 9714.01 MB/s 33.27 pct 524 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-32 78432 15047 ns/op 8319.15 MB/s 40.34 pct 66 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-32 65800 18436 ns/op 8249.63 MB/s 37.75 pct 88 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-32 102993 11523 ns/op 35546.09 MB/s 3.637 pct 143 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-32 1000000 1070 ns/op 95720.98 MB/s 80.53 pct 3 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-32 749802 1752 ns/op 70272.35 MB/s 100.0 pct 5 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-32 22640 52934 ns/op 13263.37 MB/s 26.25 pct 1014 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html.zst-32 226412 5232 ns/op 19572.27 MB/s 14.49 pct 20 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-32 923041 1276 ns/op 3194.71 MB/s 31.26 pct 0 B/op 0 allocs/op +``` + +This reflects the performance around May 2022, but this may be out of date. + +## Zstd inside ZIP files + +It is possible to use zstandard to compress individual files inside zip archives. +While this isn't widely supported it can be useful for internal files. + +To support the compression and decompression of these files you must register a compressor and decompressor. + +It is highly recommended registering the (de)compressors on individual zip Reader/Writer and NOT +use the global registration functions. The main reason for this is that 2 registrations from +different packages will result in a panic. + +It is a good idea to only have a single compressor and decompressor, since they can be used for multiple zip +files concurrently, and using a single instance will allow reusing some resources. + +See [this example](https://pkg.go.dev/github.com/klauspost/compress/zstd#example-ZipCompressor) for +how to compress and decompress files inside zip archives. + +# Contributions + +Contributions are always welcome. +For new features/fixes, remember to add tests and for performance enhancements include benchmarks. + +For general feedback and experience reports, feel free to open an issue or write me on [Twitter](https://twitter.com/sh0dan). + +This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare. diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go new file mode 100644 index 0000000000000..25ca983941dbc --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go @@ -0,0 +1,136 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "math/bits" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + value uint64 // Maybe use [16]byte, but shifting is awkward. + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBits(uint32(v))) + return nil +} + +// getBits will return n bits. n can be 0. +func (b *bitReader) getBits(n uint8) int { + if n == 0 /*|| b.bitsRead >= 64 */ { + return 0 + } + return int(b.get32BitsFast(n)) +} + +// get32BitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) get32BitsFast(n uint8) uint32 { + const regMask = 64 - 1 + v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + b.bitsRead += n + return v +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + v := b.in[len(b.in)-4:] + b.in = b.in[:len(b.in)-4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + v := b.in[len(b.in)-8:] + b.in = b.in[:len(b.in)-8] + b.value = binary.LittleEndian.Uint64(v) + b.bitsRead = 0 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if len(b.in) >= 4 { + v := b.in[len(b.in)-4:] + b.in = b.in[:len(b.in)-4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + return + } + + b.bitsRead -= uint8(8 * len(b.in)) + for len(b.in) > 0 { + b.value = (b.value << 8) | uint64(b.in[len(b.in)-1]) + b.in = b.in[:len(b.in)-1] + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return len(b.in) == 0 && b.bitsRead >= 64 +} + +// overread returns true if more bits have been requested than is on the stream. +func (b *bitReader) overread() bool { + return b.bitsRead > 64 +} + +// remain returns the number of bits remaining. +func (b *bitReader) remain() uint { + return 8*uint(len(b.in)) + 64 - uint(b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if !b.finished() { + return fmt.Errorf("%d extra bits on block, should be 0", b.remain()) + } + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +func highBits(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go new file mode 100644 index 0000000000000..1952f175b0dcf --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go @@ -0,0 +1,112 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package zstd + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +var bitMask32 = [32]uint32{ + 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, + 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, + 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, + 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, +} // up to 32 bits + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits32NC will add up to 31 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits32NC(value uint32, bits uint8) { + b.bitContainer |= uint64(value&bitMask32[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits64NC will add up to 64 bits. +// There must be space for 32 bits. +func (b *bitWriter) addBits64NC(value uint64, bits uint8) { + if bits <= 31 { + b.addBits32Clean(uint32(value), bits) + return + } + b.addBits32Clean(uint32(value), 32) + b.flush32() + b.addBits32Clean(uint32(value>>32), bits-32) +} + +// addBits32Clean will add up to 32 bits. +// It will not check if there is space for them. +// The input must not contain more bits than specified. +func (b *bitWriter) addBits32Clean(value uint32, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go new file mode 100644 index 0000000000000..03744fbc76534 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -0,0 +1,729 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "hash/crc32" + "io" + "os" + "path/filepath" + "sync" + + "github.com/klauspost/compress/huff0" + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +type blockType uint8 + +//go:generate stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex + +const ( + blockTypeRaw blockType = iota + blockTypeRLE + blockTypeCompressed + blockTypeReserved +) + +type literalsBlockType uint8 + +const ( + literalsBlockRaw literalsBlockType = iota + literalsBlockRLE + literalsBlockCompressed + literalsBlockTreeless +) + +const ( + // maxCompressedBlockSize is the biggest allowed compressed block size (128KB) + maxCompressedBlockSize = 128 << 10 + + compressedBlockOverAlloc = 16 + maxCompressedBlockSizeAlloc = 128<<10 + compressedBlockOverAlloc + + // Maximum possible block size (all Raw+Uncompressed). + maxBlockSize = (1 << 21) - 1 + + maxMatchLen = 131074 + maxSequences = 0x7f00 + 0xffff + + // We support slightly less than the reference decoder to be able to + // use ints on 32 bit archs. + maxOffsetBits = 30 +) + +var ( + huffDecoderPool = sync.Pool{New: func() interface{} { + return &huff0.Scratch{} + }} + + fseDecoderPool = sync.Pool{New: func() interface{} { + return &fseDecoder{} + }} +) + +type blockDec struct { + // Raw source data of the block. + data []byte + dataStorage []byte + + // Destination of the decoded data. + dst []byte + + // Buffer for literals data. + literalBuf []byte + + // Window size of the block. + WindowSize uint64 + + err error + + // Check against this crc, if hasCRC is true. + checkCRC uint32 + hasCRC bool + + // Frame to use for singlethreaded decoding. + // Should not be used by the decoder itself since parent may be another frame. + localFrame *frameDec + + sequence []seqVals + + async struct { + newHist *history + literals []byte + seqData []byte + seqSize int // Size of uncompressed sequences + fcs uint64 + } + + // Block is RLE, this is the size. + RLESize uint32 + + Type blockType + + // Is this the last block of a frame? + Last bool + + // Use less memory + lowMem bool +} + +func (b *blockDec) String() string { + if b == nil { + return "" + } + return fmt.Sprintf("Steam Size: %d, Type: %v, Last: %t, Window: %d", len(b.data), b.Type, b.Last, b.WindowSize) +} + +func newBlockDec(lowMem bool) *blockDec { + b := blockDec{ + lowMem: lowMem, + } + return &b +} + +// reset will reset the block. +// Input must be a start of a block and will be at the end of the block when returned. +func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { + b.WindowSize = windowSize + tmp, err := br.readSmall(3) + if err != nil { + println("Reading block header:", err) + return err + } + bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) + b.Last = bh&1 != 0 + b.Type = blockType((bh >> 1) & 3) + // find size. + cSize := int(bh >> 3) + maxSize := maxCompressedBlockSizeAlloc + switch b.Type { + case blockTypeReserved: + return ErrReservedBlockType + case blockTypeRLE: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } + b.RLESize = uint32(cSize) + if b.lowMem { + maxSize = cSize + } + cSize = 1 + case blockTypeCompressed: + if debugDecoder { + println("Data size on stream:", cSize) + } + b.RLESize = 0 + maxSize = maxCompressedBlockSizeAlloc + if windowSize < maxCompressedBlockSize && b.lowMem { + maxSize = int(windowSize) + compressedBlockOverAlloc + } + if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize { + if debugDecoder { + printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrCompressedSizeTooBig + } + // Empty compressed blocks must at least be 2 bytes + // for Literals_Block_Type and one for Sequences_Section_Header. + if cSize < 2 { + return ErrBlockTooSmall + } + case blockTypeRaw: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } + + b.RLESize = 0 + // We do not need a destination for raw blocks. + maxSize = -1 + default: + panic("Invalid block type") + } + + // Read block data. + if _, ok := br.(*byteBuf); !ok && cap(b.dataStorage) < cSize { + // byteBuf doesn't need a destination buffer. + if b.lowMem || cSize > maxCompressedBlockSize { + b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc) + } else { + b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc) + } + } + b.data, err = br.readBig(cSize, b.dataStorage) + if err != nil { + if debugDecoder { + println("Reading block:", err, "(", cSize, ")", len(b.data)) + printf("%T", br) + } + return err + } + if cap(b.dst) <= maxSize { + b.dst = make([]byte, 0, maxSize+1) + } + return nil +} + +// sendEOF will make the decoder send EOF on this frame. +func (b *blockDec) sendErr(err error) { + b.Last = true + b.Type = blockTypeReserved + b.err = err +} + +// Close will release resources. +// Closed blockDec cannot be reset. +func (b *blockDec) Close() { +} + +// decodeBuf +func (b *blockDec) decodeBuf(hist *history) error { + switch b.Type { + case blockTypeRLE: + if cap(b.dst) < int(b.RLESize) { + if b.lowMem { + b.dst = make([]byte, b.RLESize) + } else { + b.dst = make([]byte, maxCompressedBlockSize) + } + } + b.dst = b.dst[:b.RLESize] + v := b.data[0] + for i := range b.dst { + b.dst[i] = v + } + hist.appendKeep(b.dst) + return nil + case blockTypeRaw: + hist.appendKeep(b.data) + return nil + case blockTypeCompressed: + saved := b.dst + // Append directly to history + if hist.ignoreBuffer == 0 { + b.dst = hist.b + hist.b = nil + } else { + b.dst = b.dst[:0] + } + err := b.decodeCompressed(hist) + if debugDecoder { + println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err) + } + if hist.ignoreBuffer == 0 { + hist.b = b.dst + b.dst = saved + } else { + hist.appendKeep(b.dst) + } + return err + case blockTypeReserved: + // Used for returning errors. + return b.err + default: + panic("Invalid block type") + } +} + +func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err error) { + // There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header + if len(in) < 2 { + return in, ErrBlockTooSmall + } + + litType := literalsBlockType(in[0] & 3) + var litRegenSize int + var litCompSize int + sizeFormat := (in[0] >> 2) & 3 + var fourStreams bool + var literals []byte + switch litType { + case literalsBlockRaw, literalsBlockRLE: + switch sizeFormat { + case 0, 2: + // Regenerated_Size uses 5 bits (0-31). Literals_Section_Header uses 1 byte. + litRegenSize = int(in[0] >> 3) + in = in[1:] + case 1: + // Regenerated_Size uses 12 bits (0-4095). Literals_Section_Header uses 2 bytes. + litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + in = in[2:] + case 3: + // Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes. + if len(in) < 3 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12) + in = in[3:] + } + case literalsBlockCompressed, literalsBlockTreeless: + switch sizeFormat { + case 0, 1: + // Both Regenerated_Size and Compressed_Size use 10 bits (0-1023). + if len(in) < 3 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + litRegenSize = int(n & 1023) + litCompSize = int(n >> 10) + fourStreams = sizeFormat == 1 + in = in[3:] + case 2: + fourStreams = true + if len(in) < 4 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + litRegenSize = int(n & 16383) + litCompSize = int(n >> 14) + in = in[4:] + case 3: + fourStreams = true + if len(in) < 5 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28) + litRegenSize = int(n & 262143) + litCompSize = int(n >> 18) + in = in[5:] + } + } + if debugDecoder { + println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams) + } + if litRegenSize > int(b.WindowSize) || litRegenSize > maxCompressedBlockSize { + return in, ErrWindowSizeExceeded + } + + switch litType { + case literalsBlockRaw: + if len(in) < litRegenSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize) + return in, ErrBlockTooSmall + } + literals = in[:litRegenSize] + in = in[litRegenSize:] + //printf("Found %d uncompressed literals\n", litRegenSize) + case literalsBlockRLE: + if len(in) < 1 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1) + return in, ErrBlockTooSmall + } + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, litRegenSize, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, litRegenSize, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + literals = b.literalBuf[:litRegenSize] + v := in[0] + for i := range literals { + literals[i] = v + } + in = in[1:] + if debugDecoder { + printf("Found %d RLE compressed literals\n", litRegenSize) + } + case literalsBlockTreeless: + if len(in) < litCompSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) + return in, ErrBlockTooSmall + } + // Store compressed literals, so we defer decoding until we get history. + literals = in[:litCompSize] + in = in[litCompSize:] + if debugDecoder { + printf("Found %d compressed literals\n", litCompSize) + } + huff := hist.huffTree + if huff == nil { + return in, errors.New("literal block was treeless, but no history was defined") + } + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + var err error + // Use our out buffer. + huff.MaxDecodedSize = litRegenSize + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + // Make sure we don't leak our literals buffer + if err != nil { + println("decompressing literals:", err) + return in, err + } + if len(literals) != litRegenSize { + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + + case literalsBlockCompressed: + if len(in) < litCompSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) + return in, ErrBlockTooSmall + } + literals = in[:litCompSize] + in = in[litCompSize:] + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + huff := hist.huffTree + if huff == nil || (hist.dict != nil && huff == hist.dict.litEnc) { + huff = huffDecoderPool.Get().(*huff0.Scratch) + if huff == nil { + huff = &huff0.Scratch{} + } + } + var err error + if debugDecoder { + println("huff table input:", len(literals), "CRC:", crc32.ChecksumIEEE(literals)) + } + huff, literals, err = huff0.ReadTable(literals, huff) + if err != nil { + println("reading huffman table:", err) + return in, err + } + hist.huffTree = huff + huff.MaxDecodedSize = litRegenSize + // Use our out buffer. + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + if err != nil { + println("decoding compressed literals:", err) + return in, err + } + // Make sure we don't leak our literals buffer + if len(literals) != litRegenSize { + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + // Re-cap to get extra size. + literals = b.literalBuf[:len(literals)] + if debugDecoder { + printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) + } + } + hist.decoders.literals = literals + return in, nil +} + +// decodeCompressed will start decompressing a block. +func (b *blockDec) decodeCompressed(hist *history) error { + in := b.data + in, err := b.decodeLiterals(in, hist) + if err != nil { + return err + } + err = b.prepareSequences(in, hist) + if err != nil { + return err + } + if hist.decoders.nSeqs == 0 { + b.dst = append(b.dst, hist.decoders.literals...) + return nil + } + before := len(hist.decoders.out) + err = hist.decoders.decodeSync(hist.b[hist.ignoreBuffer:]) + if err != nil { + return err + } + if hist.decoders.maxSyncLen > 0 { + hist.decoders.maxSyncLen += uint64(before) + hist.decoders.maxSyncLen -= uint64(len(hist.decoders.out)) + } + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset + return nil +} + +func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { + if debugDecoder { + printf("prepareSequences: %d byte(s) input\n", len(in)) + } + // Decode Sequences + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section + if len(in) < 1 { + return ErrBlockTooSmall + } + var nSeqs int + seqHeader := in[0] + switch { + case seqHeader < 128: + nSeqs = int(seqHeader) + in = in[1:] + case seqHeader < 255: + if len(in) < 2 { + return ErrBlockTooSmall + } + nSeqs = int(seqHeader-128)<<8 | int(in[1]) + in = in[2:] + case seqHeader == 255: + if len(in) < 3 { + return ErrBlockTooSmall + } + nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8) + in = in[3:] + } + if nSeqs == 0 && len(in) != 0 { + // When no sequences, there should not be any more data... + if debugDecoder { + printf("prepareSequences: 0 sequences, but %d byte(s) left on stream\n", len(in)) + } + return ErrUnexpectedBlockSize + } + + var seqs = &hist.decoders + seqs.nSeqs = nSeqs + if nSeqs > 0 { + if len(in) < 1 { + return ErrBlockTooSmall + } + br := byteReader{b: in, off: 0} + compMode := br.Uint8() + br.advance(1) + if debugDecoder { + printf("Compression modes: 0b%b", compMode) + } + if compMode&3 != 0 { + return errors.New("corrupt block: reserved bits not zero") + } + for i := uint(0); i < 3; i++ { + mode := seqCompMode((compMode >> (6 - i*2)) & 3) + if debugDecoder { + println("Table", tableIndex(i), "is", mode) + } + var seq *sequenceDec + switch tableIndex(i) { + case tableLiteralLengths: + seq = &seqs.litLengths + case tableOffsets: + seq = &seqs.offsets + case tableMatchLengths: + seq = &seqs.matchLengths + default: + panic("unknown table") + } + switch mode { + case compModePredefined: + if seq.fse != nil && !seq.fse.preDefined { + fseDecoderPool.Put(seq.fse) + } + seq.fse = &fsePredef[i] + case compModeRLE: + if br.remain() < 1 { + return ErrBlockTooSmall + } + v := br.Uint8() + br.advance(1) + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } + symb, err := decSymbolValue(v, symbolTableX[i]) + if err != nil { + printf("RLE Transform table (%v) error: %v", tableIndex(i), err) + return err + } + seq.fse.setRLE(symb) + if debugDecoder { + printf("RLE set to 0x%x, code: %v", symb, v) + } + case compModeFSE: + println("Reading table for", tableIndex(i)) + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } + err := seq.fse.readNCount(&br, uint16(maxTableSymbol[i])) + if err != nil { + println("Read table error:", err) + return err + } + err = seq.fse.transform(symbolTableX[i]) + if err != nil { + println("Transform table error:", err) + return err + } + if debugDecoder { + println("Read table ok", "symbolLen:", seq.fse.symbolLen) + } + case compModeRepeat: + seq.repeat = true + } + if br.overread() { + return io.ErrUnexpectedEOF + } + } + in = br.unread() + } + if debugDecoder { + println("Literals:", len(seqs.literals), "hash:", xxhash.Sum64(seqs.literals), "and", seqs.nSeqs, "sequences.") + } + + if nSeqs == 0 { + if len(b.sequence) > 0 { + b.sequence = b.sequence[:0] + } + return nil + } + br := seqs.br + if br == nil { + br = &bitReader{} + } + if err := br.init(in); err != nil { + return err + } + + if err := seqs.initialize(br, hist, b.dst); err != nil { + println("initializing sequences:", err) + return err + } + // Extract blocks... + if false && hist.dict == nil { + fatalErr := func(err error) { + if err != nil { + panic(err) + } + } + fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize) + var buf bytes.Buffer + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse)) + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse)) + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse)) + buf.Write(in) + os.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm) + } + + return nil +} + +func (b *blockDec) decodeSequences(hist *history) error { + if cap(b.sequence) < hist.decoders.nSeqs { + if b.lowMem { + b.sequence = make([]seqVals, 0, hist.decoders.nSeqs) + } else { + b.sequence = make([]seqVals, 0, 0x7F00+0xffff) + } + } + b.sequence = b.sequence[:hist.decoders.nSeqs] + if hist.decoders.nSeqs == 0 { + hist.decoders.seqSize = len(hist.decoders.literals) + return nil + } + hist.decoders.windowSize = hist.windowSize + hist.decoders.prevOffset = hist.recentOffsets + + err := hist.decoders.decode(b.sequence) + hist.recentOffsets = hist.decoders.prevOffset + return err +} + +func (b *blockDec) executeSequences(hist *history) error { + hbytes := hist.b + if len(hbytes) > hist.windowSize { + hbytes = hbytes[len(hbytes)-hist.windowSize:] + // We do not need history anymore. + if hist.dict != nil { + hist.dict.content = nil + } + } + hist.decoders.windowSize = hist.windowSize + hist.decoders.out = b.dst[:0] + err := hist.decoders.execute(b.sequence, hbytes) + if err != nil { + return err + } + return b.updateHistory(hist) +} + +func (b *blockDec) updateHistory(hist *history) error { + if len(b.data) > maxCompressedBlockSize { + return fmt.Errorf("compressed block size too large (%d)", len(b.data)) + } + // Set output and release references. + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset + + if b.Last { + // if last block we don't care about history. + println("Last block, no history returned") + hist.b = hist.b[:0] + return nil + } else { + hist.append(b.dst) + if debugDecoder { + println("Finished block with ", len(b.sequence), "sequences. Added", len(b.dst), "to history, now length", len(hist.b)) + } + } + hist.decoders.out, hist.decoders.literals = nil, nil + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go new file mode 100644 index 0000000000000..32a7f401d5d2a --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -0,0 +1,909 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math" + "math/bits" + + "github.com/klauspost/compress/huff0" +) + +type blockEnc struct { + size int + literals []byte + sequences []seq + coders seqCoders + litEnc *huff0.Scratch + dictLitEnc *huff0.Scratch + wr bitWriter + + extraLits int + output []byte + recentOffsets [3]uint32 + prevRecentOffsets [3]uint32 + + last bool + lowMem bool +} + +// init should be used once the block has been created. +// If called more than once, the effect is the same as calling reset. +func (b *blockEnc) init() { + if b.lowMem { + // 1K literals + if cap(b.literals) < 1<<10 { + b.literals = make([]byte, 0, 1<<10) + } + const defSeqs = 20 + if cap(b.sequences) < defSeqs { + b.sequences = make([]seq, 0, defSeqs) + } + // 1K + if cap(b.output) < 1<<10 { + b.output = make([]byte, 0, 1<<10) + } + } else { + if cap(b.literals) < maxCompressedBlockSize { + b.literals = make([]byte, 0, maxCompressedBlockSize) + } + const defSeqs = 2000 + if cap(b.sequences) < defSeqs { + b.sequences = make([]seq, 0, defSeqs) + } + if cap(b.output) < maxCompressedBlockSize { + b.output = make([]byte, 0, maxCompressedBlockSize) + } + } + + if b.coders.mlEnc == nil { + b.coders.mlEnc = &fseEncoder{} + b.coders.mlPrev = &fseEncoder{} + b.coders.ofEnc = &fseEncoder{} + b.coders.ofPrev = &fseEncoder{} + b.coders.llEnc = &fseEncoder{} + b.coders.llPrev = &fseEncoder{} + } + b.litEnc = &huff0.Scratch{WantLogLess: 4} + b.reset(nil) +} + +// initNewEncode can be used to reset offsets and encoders to the initial state. +func (b *blockEnc) initNewEncode() { + b.recentOffsets = [3]uint32{1, 4, 8} + b.litEnc.Reuse = huff0.ReusePolicyNone + b.coders.setPrev(nil, nil, nil) +} + +// reset will reset the block for a new encode, but in the same stream, +// meaning that state will be carried over, but the block content is reset. +// If a previous block is provided, the recent offsets are carried over. +func (b *blockEnc) reset(prev *blockEnc) { + b.extraLits = 0 + b.literals = b.literals[:0] + b.size = 0 + b.sequences = b.sequences[:0] + b.output = b.output[:0] + b.last = false + if prev != nil { + b.recentOffsets = prev.prevRecentOffsets + } + b.dictLitEnc = nil +} + +// reset will reset the block for a new encode, but in the same stream, +// meaning that state will be carried over, but the block content is reset. +// If a previous block is provided, the recent offsets are carried over. +func (b *blockEnc) swapEncoders(prev *blockEnc) { + b.coders.swap(&prev.coders) + b.litEnc, prev.litEnc = prev.litEnc, b.litEnc +} + +// blockHeader contains the information for a block header. +type blockHeader uint32 + +// setLast sets the 'last' indicator on a block. +func (h *blockHeader) setLast(b bool) { + if b { + *h = *h | 1 + } else { + const mask = (1 << 24) - 2 + *h = *h & mask + } +} + +// setSize will store the compressed size of a block. +func (h *blockHeader) setSize(v uint32) { + const mask = 7 + *h = (*h)&mask | blockHeader(v<<3) +} + +// setType sets the block type. +func (h *blockHeader) setType(t blockType) { + const mask = 1 | (((1 << 24) - 1) ^ 7) + *h = (*h & mask) | blockHeader(t<<1) +} + +// appendTo will append the block header to a slice. +func (h blockHeader) appendTo(b []byte) []byte { + return append(b, uint8(h), uint8(h>>8), uint8(h>>16)) +} + +// String returns a string representation of the block. +func (h blockHeader) String() string { + return fmt.Sprintf("Type: %d, Size: %d, Last:%t", (h>>1)&3, h>>3, h&1 == 1) +} + +// literalsHeader contains literals header information. +type literalsHeader uint64 + +// setType can be used to set the type of literal block. +func (h *literalsHeader) setType(t literalsBlockType) { + const mask = math.MaxUint64 - 3 + *h = (*h & mask) | literalsHeader(t) +} + +// setSize can be used to set a single size, for uncompressed and RLE content. +func (h *literalsHeader) setSize(regenLen int) { + inBits := bits.Len32(uint32(regenLen)) + // Only retain 2 bits + const mask = 3 + lh := uint64(*h & mask) + switch { + case inBits < 5: + lh |= (uint64(regenLen) << 3) | (1 << 60) + if debugEncoder { + got := int(lh>>3) & 0xff + if got != regenLen { + panic(fmt.Sprint("litRegenSize = ", regenLen, "(want) != ", got, "(got)")) + } + } + case inBits < 12: + lh |= (1 << 2) | (uint64(regenLen) << 4) | (2 << 60) + case inBits < 20: + lh |= (3 << 2) | (uint64(regenLen) << 4) | (3 << 60) + default: + panic(fmt.Errorf("internal error: block too big (%d)", regenLen)) + } + *h = literalsHeader(lh) +} + +// setSizes will set the size of a compressed literals section and the input length. +func (h *literalsHeader) setSizes(compLen, inLen int, single bool) { + compBits, inBits := bits.Len32(uint32(compLen)), bits.Len32(uint32(inLen)) + // Only retain 2 bits + const mask = 3 + lh := uint64(*h & mask) + switch { + case compBits <= 10 && inBits <= 10: + if !single { + lh |= 1 << 2 + } + lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60) + if debugEncoder { + const mmask = (1 << 24) - 1 + n := (lh >> 4) & mmask + if int(n&1023) != inLen { + panic(fmt.Sprint("regensize:", int(n&1023), "!=", inLen, inBits)) + } + if int(n>>10) != compLen { + panic(fmt.Sprint("compsize:", int(n>>10), "!=", compLen, compBits)) + } + } + case compBits <= 14 && inBits <= 14: + lh |= (2 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (14 + 4)) | (4 << 60) + if single { + panic("single stream used with more than 10 bits length.") + } + case compBits <= 18 && inBits <= 18: + lh |= (3 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (18 + 4)) | (5 << 60) + if single { + panic("single stream used with more than 10 bits length.") + } + default: + panic("internal error: block too big") + } + *h = literalsHeader(lh) +} + +// appendTo will append the literals header to a byte slice. +func (h literalsHeader) appendTo(b []byte) []byte { + size := uint8(h >> 60) + switch size { + case 1: + b = append(b, uint8(h)) + case 2: + b = append(b, uint8(h), uint8(h>>8)) + case 3: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16)) + case 4: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24)) + case 5: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24), uint8(h>>32)) + default: + panic(fmt.Errorf("internal error: literalsHeader has invalid size (%d)", size)) + } + return b +} + +// size returns the output size with currently set values. +func (h literalsHeader) size() int { + return int(h >> 60) +} + +func (h literalsHeader) String() string { + return fmt.Sprintf("Type: %d, SizeFormat: %d, Size: 0x%d, Bytes:%d", literalsBlockType(h&3), (h>>2)&3, h&((1<<60)-1)>>4, h>>60) +} + +// pushOffsets will push the recent offsets to the backup store. +func (b *blockEnc) pushOffsets() { + b.prevRecentOffsets = b.recentOffsets +} + +// pushOffsets will push the recent offsets to the backup store. +func (b *blockEnc) popOffsets() { + b.recentOffsets = b.prevRecentOffsets +} + +// matchOffset will adjust recent offsets and return the adjusted one, +// if it matches a previous offset. +func (b *blockEnc) matchOffset(offset, lits uint32) uint32 { + // Check if offset is one of the recent offsets. + // Adjusts the output offset accordingly. + // Gives a tiny bit of compression, typically around 1%. + if true { + if lits > 0 { + switch offset { + case b.recentOffsets[0]: + offset = 1 + case b.recentOffsets[1]: + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 2 + case b.recentOffsets[2]: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 3 + default: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset += 3 + } + } else { + switch offset { + case b.recentOffsets[1]: + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 1 + case b.recentOffsets[2]: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 2 + case b.recentOffsets[0] - 1: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 3 + default: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset += 3 + } + } + } else { + offset += 3 + } + return offset +} + +// encodeRaw can be used to set the output to a raw representation of supplied bytes. +func (b *blockEnc) encodeRaw(a []byte) { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(a))) + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output[:0]) + b.output = append(b.output, a...) + if debugEncoder { + println("Adding RAW block, length", len(a), "last:", b.last) + } +} + +// encodeRaw can be used to set the output to a raw representation of supplied bytes. +func (b *blockEnc) encodeRawTo(dst, src []byte) []byte { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(src))) + bh.setType(blockTypeRaw) + dst = bh.appendTo(dst) + dst = append(dst, src...) + if debugEncoder { + println("Adding RAW block, length", len(src), "last:", b.last) + } + return dst +} + +// encodeLits can be used if the block is only litLen. +func (b *blockEnc) encodeLits(lits []byte, raw bool) error { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(lits))) + + // Don't compress extremely small blocks + if len(lits) < 8 || (len(lits) < 32 && b.dictLitEnc == nil) || raw { + if debugEncoder { + println("Adding RAW block, length", len(lits), "last:", b.last) + } + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits...) + return nil + } + + var ( + out []byte + reUsed, single bool + err error + ) + if b.dictLitEnc != nil { + b.litEnc.TransferCTable(b.dictLitEnc) + b.litEnc.Reuse = huff0.ReusePolicyAllow + b.dictLitEnc = nil + } + if len(lits) >= 1024 { + // Use 4 Streams. + out, reUsed, err = huff0.Compress4X(lits, b.litEnc) + } else if len(lits) > 16 { + // Use 1 stream + single = true + out, reUsed, err = huff0.Compress1X(lits, b.litEnc) + } else { + err = huff0.ErrIncompressible + } + if err == nil && len(out)+5 > len(lits) { + // If we are close, we may still be worse or equal to raw. + var lh literalsHeader + lh.setSizes(len(out), len(lits), single) + if len(out)+lh.size() >= len(lits) { + err = huff0.ErrIncompressible + } + } + switch err { + case huff0.ErrIncompressible: + if debugEncoder { + println("Adding RAW block, length", len(lits), "last:", b.last) + } + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits...) + return nil + case huff0.ErrUseRLE: + if debugEncoder { + println("Adding RLE block, length", len(lits)) + } + bh.setType(blockTypeRLE) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits[0]) + return nil + case nil: + default: + return err + } + // Compressed... + // Now, allow reuse + b.litEnc.Reuse = huff0.ReusePolicyAllow + bh.setType(blockTypeCompressed) + var lh literalsHeader + if reUsed { + if debugEncoder { + println("Reused tree, compressed to", len(out)) + } + lh.setType(literalsBlockTreeless) + } else { + if debugEncoder { + println("New tree, compressed to", len(out), "tree size:", len(b.litEnc.OutTable)) + } + lh.setType(literalsBlockCompressed) + } + // Set sizes + lh.setSizes(len(out), len(lits), single) + bh.setSize(uint32(len(out) + lh.size() + 1)) + + // Write block headers. + b.output = bh.appendTo(b.output) + b.output = lh.appendTo(b.output) + // Add compressed data. + b.output = append(b.output, out...) + // No sequences. + b.output = append(b.output, 0) + return nil +} + +// encodeRLE will encode an RLE block. +func (b *blockEnc) encodeRLE(val byte, length uint32) { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(length) + bh.setType(blockTypeRLE) + b.output = bh.appendTo(b.output) + b.output = append(b.output, val) +} + +// fuzzFseEncoder can be used to fuzz the FSE encoder. +func fuzzFseEncoder(data []byte) int { + if len(data) > maxSequences || len(data) < 2 { + return 0 + } + enc := fseEncoder{} + hist := enc.Histogram() + maxSym := uint8(0) + for i, v := range data { + v = v & 63 + data[i] = v + hist[v]++ + if v > maxSym { + maxSym = v + } + } + if maxSym == 0 { + // All 0 + return 0 + } + maxCount := func(a []uint32) int { + var max uint32 + for _, v := range a { + if v > max { + max = v + } + } + return int(max) + } + cnt := maxCount(hist[:maxSym]) + if cnt == len(data) { + // RLE + return 0 + } + enc.HistogramFinished(maxSym, cnt) + err := enc.normalizeCount(len(data)) + if err != nil { + return 0 + } + _, err = enc.writeCount(nil) + if err != nil { + panic(err) + } + return 1 +} + +// encode will encode the block and append the output in b.output. +// Previous offset codes must be pushed if more blocks are expected. +func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { + if len(b.sequences) == 0 { + return b.encodeLits(b.literals, rawAllLits) + } + if len(b.sequences) == 1 && len(org) > 0 && len(b.literals) <= 1 { + // Check common RLE cases. + seq := b.sequences[0] + if seq.litLen == uint32(len(b.literals)) && seq.offset-3 == 1 { + // Offset == 1 and 0 or 1 literals. + b.encodeRLE(org[0], b.sequences[0].matchLen+zstdMinMatch+seq.litLen) + return nil + } + } + + // We want some difference to at least account for the headers. + saved := b.size - len(b.literals) - (b.size >> 6) + if saved < 16 { + if org == nil { + return errIncompressible + } + b.popOffsets() + return b.encodeLits(org, rawAllLits) + } + + var bh blockHeader + var lh literalsHeader + bh.setLast(b.last) + bh.setType(blockTypeCompressed) + // Store offset of the block header. Needed when we know the size. + bhOffset := len(b.output) + b.output = bh.appendTo(b.output) + + var ( + out []byte + reUsed, single bool + err error + ) + if b.dictLitEnc != nil { + b.litEnc.TransferCTable(b.dictLitEnc) + b.litEnc.Reuse = huff0.ReusePolicyAllow + b.dictLitEnc = nil + } + if len(b.literals) >= 1024 && !raw { + // Use 4 Streams. + out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc) + } else if len(b.literals) > 16 && !raw { + // Use 1 stream + single = true + out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc) + } else { + err = huff0.ErrIncompressible + } + + if err == nil && len(out)+5 > len(b.literals) { + // If we are close, we may still be worse or equal to raw. + var lh literalsHeader + lh.setSize(len(b.literals)) + szRaw := lh.size() + lh.setSizes(len(out), len(b.literals), single) + szComp := lh.size() + if len(out)+szComp >= len(b.literals)+szRaw { + err = huff0.ErrIncompressible + } + } + switch err { + case huff0.ErrIncompressible: + lh.setType(literalsBlockRaw) + lh.setSize(len(b.literals)) + b.output = lh.appendTo(b.output) + b.output = append(b.output, b.literals...) + if debugEncoder { + println("Adding literals RAW, length", len(b.literals)) + } + case huff0.ErrUseRLE: + lh.setType(literalsBlockRLE) + lh.setSize(len(b.literals)) + b.output = lh.appendTo(b.output) + b.output = append(b.output, b.literals[0]) + if debugEncoder { + println("Adding literals RLE") + } + case nil: + // Compressed litLen... + if reUsed { + if debugEncoder { + println("reused tree") + } + lh.setType(literalsBlockTreeless) + } else { + if debugEncoder { + println("new tree, size:", len(b.litEnc.OutTable)) + } + lh.setType(literalsBlockCompressed) + if debugEncoder { + _, _, err := huff0.ReadTable(out, nil) + if err != nil { + panic(err) + } + } + } + lh.setSizes(len(out), len(b.literals), single) + if debugEncoder { + printf("Compressed %d literals to %d bytes", len(b.literals), len(out)) + println("Adding literal header:", lh) + } + b.output = lh.appendTo(b.output) + b.output = append(b.output, out...) + b.litEnc.Reuse = huff0.ReusePolicyAllow + if debugEncoder { + println("Adding literals compressed") + } + default: + if debugEncoder { + println("Adding literals ERROR:", err) + } + return err + } + // Sequence compression + + // Write the number of sequences + switch { + case len(b.sequences) < 128: + b.output = append(b.output, uint8(len(b.sequences))) + case len(b.sequences) < 0x7f00: // TODO: this could be wrong + n := len(b.sequences) + b.output = append(b.output, 128+uint8(n>>8), uint8(n)) + default: + n := len(b.sequences) - 0x7f00 + b.output = append(b.output, 255, uint8(n), uint8(n>>8)) + } + if debugEncoder { + println("Encoding", len(b.sequences), "sequences") + } + b.genCodes() + llEnc := b.coders.llEnc + ofEnc := b.coders.ofEnc + mlEnc := b.coders.mlEnc + err = llEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + err = ofEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + err = mlEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + + // Choose the best compression mode for each type. + // Will evaluate the new vs predefined and previous. + chooseComp := func(cur, prev, preDef *fseEncoder) (*fseEncoder, seqCompMode) { + // See if predefined/previous is better + hist := cur.count[:cur.symbolLen] + nSize := cur.approxSize(hist) + cur.maxHeaderSize() + predefSize := preDef.approxSize(hist) + prevSize := prev.approxSize(hist) + + // Add a small penalty for new encoders. + // Don't bother with extremely small (<2 byte gains). + nSize = nSize + (nSize+2*8*16)>>4 + switch { + case predefSize <= prevSize && predefSize <= nSize || forcePreDef: + if debugEncoder { + println("Using predefined", predefSize>>3, "<=", nSize>>3) + } + return preDef, compModePredefined + case prevSize <= nSize: + if debugEncoder { + println("Using previous", prevSize>>3, "<=", nSize>>3) + } + return prev, compModeRepeat + default: + if debugEncoder { + println("Using new, predef", predefSize>>3, ". previous:", prevSize>>3, ">", nSize>>3, "header max:", cur.maxHeaderSize()>>3, "bytes") + println("tl:", cur.actualTableLog, "symbolLen:", cur.symbolLen, "norm:", cur.norm[:cur.symbolLen], "hist", cur.count[:cur.symbolLen]) + } + return cur, compModeFSE + } + } + + // Write compression mode + var mode uint8 + if llEnc.useRLE { + mode |= uint8(compModeRLE) << 6 + llEnc.setRLE(b.sequences[0].llCode) + if debugEncoder { + println("llEnc.useRLE") + } + } else { + var m seqCompMode + llEnc, m = chooseComp(llEnc, b.coders.llPrev, &fsePredefEnc[tableLiteralLengths]) + mode |= uint8(m) << 6 + } + if ofEnc.useRLE { + mode |= uint8(compModeRLE) << 4 + ofEnc.setRLE(b.sequences[0].ofCode) + if debugEncoder { + println("ofEnc.useRLE") + } + } else { + var m seqCompMode + ofEnc, m = chooseComp(ofEnc, b.coders.ofPrev, &fsePredefEnc[tableOffsets]) + mode |= uint8(m) << 4 + } + + if mlEnc.useRLE { + mode |= uint8(compModeRLE) << 2 + mlEnc.setRLE(b.sequences[0].mlCode) + if debugEncoder { + println("mlEnc.useRLE, code: ", b.sequences[0].mlCode, "value", b.sequences[0].matchLen) + } + } else { + var m seqCompMode + mlEnc, m = chooseComp(mlEnc, b.coders.mlPrev, &fsePredefEnc[tableMatchLengths]) + mode |= uint8(m) << 2 + } + b.output = append(b.output, mode) + if debugEncoder { + printf("Compression modes: 0b%b", mode) + } + b.output, err = llEnc.writeCount(b.output) + if err != nil { + return err + } + start := len(b.output) + b.output, err = ofEnc.writeCount(b.output) + if err != nil { + return err + } + if false { + println("block:", b.output[start:], "tablelog", ofEnc.actualTableLog, "maxcount:", ofEnc.maxCount) + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", ofEnc.actualTableLog, ofEnc.symbolLen) + for i, v := range ofEnc.norm[:ofEnc.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, ofEnc.count[i], v) + } + } + b.output, err = mlEnc.writeCount(b.output) + if err != nil { + return err + } + + // Maybe in block? + wr := &b.wr + wr.reset(b.output) + + var ll, of, ml cState + + // Current sequence + seq := len(b.sequences) - 1 + s := b.sequences[seq] + llEnc.setBits(llBitsTable[:]) + mlEnc.setBits(mlBitsTable[:]) + ofEnc.setBits(nil) + + llTT, ofTT, mlTT := llEnc.ct.symbolTT[:256], ofEnc.ct.symbolTT[:256], mlEnc.ct.symbolTT[:256] + + // We have 3 bounds checks here (and in the loop). + // Since we are iterating backwards it is kinda hard to avoid. + llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] + ll.init(wr, &llEnc.ct, llB) + of.init(wr, &ofEnc.ct, ofB) + wr.flush32() + ml.init(wr, &mlEnc.ct, mlB) + + // Each of these lookups also generates a bounds check. + wr.addBits32NC(s.litLen, llB.outBits) + wr.addBits32NC(s.matchLen, mlB.outBits) + wr.flush32() + wr.addBits32NC(s.offset, ofB.outBits) + if debugSequences { + println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB) + } + seq-- + // Store sequences in reverse... + for seq >= 0 { + s = b.sequences[seq] + + ofB := ofTT[s.ofCode] + wr.flush32() // tablelog max is below 8 for each, so it will fill max 24 bits. + //of.encode(ofB) + nbBitsOut := (uint32(of.state) + ofB.deltaNbBits) >> 16 + dstState := int32(of.state>>(nbBitsOut&15)) + int32(ofB.deltaFindState) + wr.addBits16NC(of.state, uint8(nbBitsOut)) + of.state = of.stateTable[dstState] + + // Accumulate extra bits. + outBits := ofB.outBits & 31 + extraBits := uint64(s.offset & bitMask32[outBits]) + extraBitsN := outBits + + mlB := mlTT[s.mlCode] + //ml.encode(mlB) + nbBitsOut = (uint32(ml.state) + mlB.deltaNbBits) >> 16 + dstState = int32(ml.state>>(nbBitsOut&15)) + int32(mlB.deltaFindState) + wr.addBits16NC(ml.state, uint8(nbBitsOut)) + ml.state = ml.stateTable[dstState] + + outBits = mlB.outBits & 31 + extraBits = extraBits<> 16 + dstState = int32(ll.state>>(nbBitsOut&15)) + int32(llB.deltaFindState) + wr.addBits16NC(ll.state, uint8(nbBitsOut)) + ll.state = ll.stateTable[dstState] + + outBits = llB.outBits & 31 + extraBits = extraBits<= b.size { + // Discard and encode as raw block. + b.output = b.encodeRawTo(b.output[:bhOffset], org) + b.popOffsets() + b.litEnc.Reuse = huff0.ReusePolicyNone + return nil + } + + // Size is output minus block header. + bh.setSize(uint32(len(b.output)-bhOffset) - 3) + if debugEncoder { + println("Rewriting block header", bh) + } + _ = bh.appendTo(b.output[bhOffset:bhOffset]) + b.coders.setPrev(llEnc, mlEnc, ofEnc) + return nil +} + +var errIncompressible = errors.New("incompressible") + +func (b *blockEnc) genCodes() { + if len(b.sequences) == 0 { + // nothing to do + return + } + if len(b.sequences) > math.MaxUint16 { + panic("can only encode up to 64K sequences") + } + // No bounds checks after here: + llH := b.coders.llEnc.Histogram() + ofH := b.coders.ofEnc.Histogram() + mlH := b.coders.mlEnc.Histogram() + for i := range llH { + llH[i] = 0 + } + for i := range ofH { + ofH[i] = 0 + } + for i := range mlH { + mlH[i] = 0 + } + + var llMax, ofMax, mlMax uint8 + for i := range b.sequences { + seq := &b.sequences[i] + v := llCode(seq.litLen) + seq.llCode = v + llH[v]++ + if v > llMax { + llMax = v + } + + v = ofCode(seq.offset) + seq.ofCode = v + ofH[v]++ + if v > ofMax { + ofMax = v + } + + v = mlCode(seq.matchLen) + seq.mlCode = v + mlH[v]++ + if v > mlMax { + mlMax = v + if debugAsserts && mlMax > maxMatchLengthSymbol { + panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen)) + } + } + } + maxCount := func(a []uint32) int { + var max uint32 + for _, v := range a { + if v > max { + max = v + } + } + return int(max) + } + if debugAsserts && mlMax > maxMatchLengthSymbol { + panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax)) + } + if debugAsserts && ofMax > maxOffsetBits { + panic(fmt.Errorf("ofMax > maxOffsetBits (%d)", ofMax)) + } + if debugAsserts && llMax > maxLiteralLengthSymbol { + panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax)) + } + + b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1])) + b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1])) + b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1])) +} diff --git a/vendor/github.com/klauspost/compress/zstd/blocktype_string.go b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go new file mode 100644 index 0000000000000..01a01e486e188 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go @@ -0,0 +1,85 @@ +// Code generated by "stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex"; DO NOT EDIT. + +package zstd + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[blockTypeRaw-0] + _ = x[blockTypeRLE-1] + _ = x[blockTypeCompressed-2] + _ = x[blockTypeReserved-3] +} + +const _blockType_name = "blockTypeRawblockTypeRLEblockTypeCompressedblockTypeReserved" + +var _blockType_index = [...]uint8{0, 12, 24, 43, 60} + +func (i blockType) String() string { + if i >= blockType(len(_blockType_index)-1) { + return "blockType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _blockType_name[_blockType_index[i]:_blockType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[literalsBlockRaw-0] + _ = x[literalsBlockRLE-1] + _ = x[literalsBlockCompressed-2] + _ = x[literalsBlockTreeless-3] +} + +const _literalsBlockType_name = "literalsBlockRawliteralsBlockRLEliteralsBlockCompressedliteralsBlockTreeless" + +var _literalsBlockType_index = [...]uint8{0, 16, 32, 55, 76} + +func (i literalsBlockType) String() string { + if i >= literalsBlockType(len(_literalsBlockType_index)-1) { + return "literalsBlockType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _literalsBlockType_name[_literalsBlockType_index[i]:_literalsBlockType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[compModePredefined-0] + _ = x[compModeRLE-1] + _ = x[compModeFSE-2] + _ = x[compModeRepeat-3] +} + +const _seqCompMode_name = "compModePredefinedcompModeRLEcompModeFSEcompModeRepeat" + +var _seqCompMode_index = [...]uint8{0, 18, 29, 40, 54} + +func (i seqCompMode) String() string { + if i >= seqCompMode(len(_seqCompMode_index)-1) { + return "seqCompMode(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _seqCompMode_name[_seqCompMode_index[i]:_seqCompMode_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[tableLiteralLengths-0] + _ = x[tableOffsets-1] + _ = x[tableMatchLengths-2] +} + +const _tableIndex_name = "tableLiteralLengthstableOffsetstableMatchLengths" + +var _tableIndex_index = [...]uint8{0, 19, 31, 48} + +func (i tableIndex) String() string { + if i >= tableIndex(len(_tableIndex_index)-1) { + return "tableIndex(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _tableIndex_name[_tableIndex_index[i]:_tableIndex_index[i+1]] +} diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go new file mode 100644 index 0000000000000..55a388553df5d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go @@ -0,0 +1,131 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "io" +) + +type byteBuffer interface { + // Read up to 8 bytes. + // Returns io.ErrUnexpectedEOF if this cannot be satisfied. + readSmall(n int) ([]byte, error) + + // Read >8 bytes. + // MAY use the destination slice. + readBig(n int, dst []byte) ([]byte, error) + + // Read a single byte. + readByte() (byte, error) + + // Skip n bytes. + skipN(n int64) error +} + +// in-memory buffer +type byteBuf []byte + +func (b *byteBuf) readSmall(n int) ([]byte, error) { + if debugAsserts && n > 8 { + panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) + } + bb := *b + if len(bb) < n { + return nil, io.ErrUnexpectedEOF + } + r := bb[:n] + *b = bb[n:] + return r, nil +} + +func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { + bb := *b + if len(bb) < n { + return nil, io.ErrUnexpectedEOF + } + r := bb[:n] + *b = bb[n:] + return r, nil +} + +func (b *byteBuf) readByte() (byte, error) { + bb := *b + if len(bb) < 1 { + return 0, io.ErrUnexpectedEOF + } + r := bb[0] + *b = bb[1:] + return r, nil +} + +func (b *byteBuf) skipN(n int64) error { + bb := *b + if n < 0 { + return fmt.Errorf("negative skip (%d) requested", n) + } + if int64(len(bb)) < n { + return io.ErrUnexpectedEOF + } + *b = bb[n:] + return nil +} + +// wrapper around a reader. +type readerWrapper struct { + r io.Reader + tmp [8]byte +} + +func (r *readerWrapper) readSmall(n int) ([]byte, error) { + if debugAsserts && n > 8 { + panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) + } + n2, err := io.ReadFull(r.r, r.tmp[:n]) + // We only really care about the actual bytes read. + if err != nil { + if err == io.EOF { + return nil, io.ErrUnexpectedEOF + } + if debugDecoder { + println("readSmall: got", n2, "want", n, "err", err) + } + return nil, err + } + return r.tmp[:n], nil +} + +func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { + if cap(dst) < n { + dst = make([]byte, n) + } + n2, err := io.ReadFull(r.r, dst[:n]) + if err == io.EOF && n > 0 { + err = io.ErrUnexpectedEOF + } + return dst[:n2], err +} + +func (r *readerWrapper) readByte() (byte, error) { + n2, err := io.ReadFull(r.r, r.tmp[:1]) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return 0, err + } + if n2 != 1 { + return 0, io.ErrUnexpectedEOF + } + return r.tmp[0], nil +} + +func (r *readerWrapper) skipN(n int64) error { + n2, err := io.CopyN(io.Discard, r.r, n) + if n2 != n { + err = io.ErrUnexpectedEOF + } + return err +} diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go new file mode 100644 index 0000000000000..0e59a242d8dc0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bytereader.go @@ -0,0 +1,82 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// overread returns whether we have advanced too far. +func (b *byteReader) overread() bool { + return b.off > len(b.b) +} + +// Int32 returns a little endian int32 starting at current offset. +func (b byteReader) Int32() int32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := int32(b2[3]) + v2 := int32(b2[2]) + v1 := int32(b2[1]) + v0 := int32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// Uint8 returns the next byte +func (b *byteReader) Uint8() uint8 { + v := b.b[b.off] + return v +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + if r := b.remain(); r < 4 { + // Very rare + v := uint32(0) + for i := 1; i <= r; i++ { + v = (v << 8) | uint32(b.b[len(b.b)-i]) + } + return v + } + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// Uint32NC returns a little endian uint32 starting at current offset. +// The caller must be sure if there are at least 4 bytes left. +func (b byteReader) Uint32NC() uint32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go new file mode 100644 index 0000000000000..6a5a2988b6f36 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go @@ -0,0 +1,261 @@ +// Copyright 2020+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "encoding/binary" + "errors" + "io" +) + +// HeaderMaxSize is the maximum size of a Frame and Block Header. +// If less is sent to Header.Decode it *may* still contain enough information. +const HeaderMaxSize = 14 + 3 + +// Header contains information about the first frame and block within that. +type Header struct { + // SingleSegment specifies whether the data is to be decompressed into a + // single contiguous memory segment. + // It implies that WindowSize is invalid and that FrameContentSize is valid. + SingleSegment bool + + // WindowSize is the window of data to keep while decoding. + // Will only be set if SingleSegment is false. + WindowSize uint64 + + // Dictionary ID. + // If 0, no dictionary. + DictionaryID uint32 + + // HasFCS specifies whether FrameContentSize has a valid value. + HasFCS bool + + // FrameContentSize is the expected uncompressed size of the entire frame. + FrameContentSize uint64 + + // Skippable will be true if the frame is meant to be skipped. + // This implies that FirstBlock.OK is false. + Skippable bool + + // SkippableID is the user-specific ID for the skippable frame. + // Valid values are between 0 to 15, inclusive. + SkippableID int + + // SkippableSize is the length of the user data to skip following + // the header. + SkippableSize uint32 + + // HeaderSize is the raw size of the frame header. + // + // For normal frames, it includes the size of the magic number and + // the size of the header (per section 3.1.1.1). + // It does not include the size for any data blocks (section 3.1.1.2) nor + // the size for the trailing content checksum. + // + // For skippable frames, this counts the size of the magic number + // along with the size of the size field of the payload. + // It does not include the size of the skippable payload itself. + // The total frame size is the HeaderSize plus the SkippableSize. + HeaderSize int + + // First block information. + FirstBlock struct { + // OK will be set if first block could be decoded. + OK bool + + // Is this the last block of a frame? + Last bool + + // Is the data compressed? + // If true CompressedSize will be populated. + // Unfortunately DecompressedSize cannot be determined + // without decoding the blocks. + Compressed bool + + // DecompressedSize is the expected decompressed size of the block. + // Will be 0 if it cannot be determined. + DecompressedSize int + + // CompressedSize of the data in the block. + // Does not include the block header. + // Will be equal to DecompressedSize if not Compressed. + CompressedSize int + } + + // If set there is a checksum present for the block content. + // The checksum field at the end is always 4 bytes long. + HasCheckSum bool +} + +// Decode the header from the beginning of the stream. +// This will decode the frame header and the first block header if enough bytes are provided. +// It is recommended to provide at least HeaderMaxSize bytes. +// If the frame header cannot be read an error will be returned. +// If there isn't enough input, io.ErrUnexpectedEOF is returned. +// The FirstBlock.OK will indicate if enough information was available to decode the first block header. +func (h *Header) Decode(in []byte) error { + _, err := h.DecodeAndStrip(in) + return err +} + +// DecodeAndStrip will decode the header from the beginning of the stream +// and on success return the remaining bytes. +// This will decode the frame header and the first block header if enough bytes are provided. +// It is recommended to provide at least HeaderMaxSize bytes. +// If the frame header cannot be read an error will be returned. +// If there isn't enough input, io.ErrUnexpectedEOF is returned. +// The FirstBlock.OK will indicate if enough information was available to decode the first block header. +func (h *Header) DecodeAndStrip(in []byte) (remain []byte, err error) { + *h = Header{} + if len(in) < 4 { + return nil, io.ErrUnexpectedEOF + } + h.HeaderSize += 4 + b, in := in[:4], in[4:] + if string(b) != frameMagic { + if string(b[1:4]) != skippableFrameMagic || b[0]&0xf0 != 0x50 { + return nil, ErrMagicMismatch + } + if len(in) < 4 { + return nil, io.ErrUnexpectedEOF + } + h.HeaderSize += 4 + h.Skippable = true + h.SkippableID = int(b[0] & 0xf) + h.SkippableSize = binary.LittleEndian.Uint32(in) + return in[4:], nil + } + + // Read Window_Descriptor + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor + if len(in) < 1 { + return nil, io.ErrUnexpectedEOF + } + fhd, in := in[0], in[1:] + h.HeaderSize++ + h.SingleSegment = fhd&(1<<5) != 0 + h.HasCheckSum = fhd&(1<<2) != 0 + if fhd&(1<<3) != 0 { + return nil, errors.New("reserved bit set on frame header") + } + + if !h.SingleSegment { + if len(in) < 1 { + return nil, io.ErrUnexpectedEOF + } + var wd byte + wd, in = in[0], in[1:] + h.HeaderSize++ + windowLog := 10 + (wd >> 3) + windowBase := uint64(1) << windowLog + windowAdd := (windowBase / 8) * uint64(wd&0x7) + h.WindowSize = windowBase + windowAdd + } + + // Read Dictionary_ID + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id + if size := fhd & 3; size != 0 { + if size == 3 { + size = 4 + } + if len(in) < int(size) { + return nil, io.ErrUnexpectedEOF + } + b, in = in[:size], in[size:] + h.HeaderSize += int(size) + switch len(b) { + case 1: + h.DictionaryID = uint32(b[0]) + case 2: + h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) + case 4: + h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + } + } + + // Read Frame_Content_Size + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size + var fcsSize int + v := fhd >> 6 + switch v { + case 0: + if h.SingleSegment { + fcsSize = 1 + } + default: + fcsSize = 1 << v + } + + if fcsSize > 0 { + h.HasFCS = true + if len(in) < fcsSize { + return nil, io.ErrUnexpectedEOF + } + b, in = in[:fcsSize], in[fcsSize:] + h.HeaderSize += int(fcsSize) + switch len(b) { + case 1: + h.FrameContentSize = uint64(b[0]) + case 2: + // When FCS_Field_Size is 2, the offset of 256 is added. + h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 + case 4: + h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) + case 8: + d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) + h.FrameContentSize = uint64(d1) | (uint64(d2) << 32) + } + } + + // Frame Header done, we will not fail from now on. + if len(in) < 3 { + return in, nil + } + tmp := in[:3] + bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) + h.FirstBlock.Last = bh&1 != 0 + blockType := blockType((bh >> 1) & 3) + // find size. + cSize := int(bh >> 3) + switch blockType { + case blockTypeReserved: + return in, nil + case blockTypeRLE: + h.FirstBlock.Compressed = true + h.FirstBlock.DecompressedSize = cSize + h.FirstBlock.CompressedSize = 1 + case blockTypeCompressed: + h.FirstBlock.Compressed = true + h.FirstBlock.CompressedSize = cSize + case blockTypeRaw: + h.FirstBlock.DecompressedSize = cSize + h.FirstBlock.CompressedSize = cSize + default: + panic("Invalid block type") + } + + h.FirstBlock.OK = true + return in, nil +} + +// AppendTo will append the encoded header to the dst slice. +// There is no error checking performed on the header values. +func (h *Header) AppendTo(dst []byte) ([]byte, error) { + if h.Skippable { + magic := [4]byte{0x50, 0x2a, 0x4d, 0x18} + magic[0] |= byte(h.SkippableID & 0xf) + dst = append(dst, magic[:]...) + f := h.SkippableSize + return append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)), nil + } + f := frameHeader{ + ContentSize: h.FrameContentSize, + WindowSize: uint32(h.WindowSize), + SingleSegment: h.SingleSegment, + Checksum: h.HasCheckSum, + DictID: h.DictionaryID, + } + return f.appendTo(dst), nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go new file mode 100644 index 0000000000000..bbca17234aa00 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -0,0 +1,948 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "context" + "encoding/binary" + "io" + "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +// Decoder provides decoding of zstandard streams. +// The decoder has been designed to operate without allocations after a warmup. +// This means that you should store the decoder for best performance. +// To re-use a stream decoder, use the Reset(r io.Reader) error to switch to another stream. +// A decoder can safely be re-used even if the previous stream failed. +// To release the resources, you must call the Close() function on a decoder. +type Decoder struct { + o decoderOptions + + // Unreferenced decoders, ready for use. + decoders chan *blockDec + + // Current read position used for Reader functionality. + current decoderState + + // sync stream decoding + syncStream struct { + decodedFrame uint64 + br readerWrapper + enabled bool + inFrame bool + dstBuf []byte + } + + frame *frameDec + + // Custom dictionaries. + dicts map[uint32]*dict + + // streamWg is the waitgroup for all streams + streamWg sync.WaitGroup +} + +// decoderState is used for maintaining state when the decoder +// is used for streaming. +type decoderState struct { + // current block being written to stream. + decodeOutput + + // output in order to be written to stream. + output chan decodeOutput + + // cancel remaining output. + cancel context.CancelFunc + + // crc of current frame + crc *xxhash.Digest + + flushed bool +} + +var ( + // Check the interfaces we want to support. + _ = io.WriterTo(&Decoder{}) + _ = io.Reader(&Decoder{}) +) + +// NewReader creates a new decoder. +// A nil Reader can be provided in which case Reset can be used to start a decode. +// +// A Decoder can be used in two modes: +// +// 1) As a stream, or +// 2) For stateless decoding using DecodeAll. +// +// Only a single stream can be decoded concurrently, but the same decoder +// can run multiple concurrent stateless decodes. It is even possible to +// use stateless decodes while a stream is being decoded. +// +// The Reset function can be used to initiate a new stream, which will considerably +// reduce the allocations normally caused by NewReader. +func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { + initPredefined() + var d Decoder + d.o.setDefault() + for _, o := range opts { + err := o(&d.o) + if err != nil { + return nil, err + } + } + d.current.crc = xxhash.New() + d.current.flushed = true + + if r == nil { + d.current.err = ErrDecoderNilInput + } + + // Transfer option dicts. + d.dicts = make(map[uint32]*dict, len(d.o.dicts)) + for _, dc := range d.o.dicts { + d.dicts[dc.id] = dc + } + d.o.dicts = nil + + // Create decoders + d.decoders = make(chan *blockDec, d.o.concurrent) + for i := 0; i < d.o.concurrent; i++ { + dec := newBlockDec(d.o.lowMem) + dec.localFrame = newFrameDec(d.o) + d.decoders <- dec + } + + if r == nil { + return &d, nil + } + return &d, d.Reset(r) +} + +// Read bytes from the decompressed stream into p. +// Returns the number of bytes written and any error that occurred. +// When the stream is done, io.EOF will be returned. +func (d *Decoder) Read(p []byte) (int, error) { + var n int + for { + if len(d.current.b) > 0 { + filled := copy(p, d.current.b) + p = p[filled:] + d.current.b = d.current.b[filled:] + n += filled + } + if len(p) == 0 { + break + } + if len(d.current.b) == 0 { + // We have an error and no more data + if d.current.err != nil { + break + } + if !d.nextBlock(n == 0) { + return n, d.current.err + } + } + } + if len(d.current.b) > 0 { + if debugDecoder { + println("returning", n, "still bytes left:", len(d.current.b)) + } + // Only return error at end of block + return n, nil + } + if d.current.err != nil { + d.drainOutput() + } + if debugDecoder { + println("returning", n, d.current.err, len(d.decoders)) + } + return n, d.current.err +} + +// Reset will reset the decoder the supplied stream after the current has finished processing. +// Note that this functionality cannot be used after Close has been called. +// Reset can be called with a nil reader to release references to the previous reader. +// After being called with a nil reader, no other operations than Reset or DecodeAll or Close +// should be used. +func (d *Decoder) Reset(r io.Reader) error { + if d.current.err == ErrDecoderClosed { + return d.current.err + } + + d.drainOutput() + + d.syncStream.br.r = nil + if r == nil { + d.current.err = ErrDecoderNilInput + if len(d.current.b) > 0 { + d.current.b = d.current.b[:0] + } + d.current.flushed = true + return nil + } + + // If bytes buffer and < 5MB, do sync decoding anyway. + if bb, ok := r.(byter); ok && bb.Len() < d.o.decodeBufsBelow && !d.o.limitToCap { + bb2 := bb + if debugDecoder { + println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) + } + b := bb2.Bytes() + var dst []byte + if cap(d.syncStream.dstBuf) > 0 { + dst = d.syncStream.dstBuf[:0] + } + + dst, err := d.DecodeAll(b, dst) + if err == nil { + err = io.EOF + } + // Save output buffer + d.syncStream.dstBuf = dst + d.current.b = dst + d.current.err = err + d.current.flushed = true + if debugDecoder { + println("sync decode to", len(dst), "bytes, err:", err) + } + return nil + } + // Remove current block. + d.stashDecoder() + d.current.decodeOutput = decodeOutput{} + d.current.err = nil + d.current.flushed = false + d.current.d = nil + d.syncStream.dstBuf = nil + + // Ensure no-one else is still running... + d.streamWg.Wait() + if d.frame == nil { + d.frame = newFrameDec(d.o) + } + + if d.o.concurrent == 1 { + return d.startSyncDecoder(r) + } + + d.current.output = make(chan decodeOutput, d.o.concurrent) + ctx, cancel := context.WithCancel(context.Background()) + d.current.cancel = cancel + d.streamWg.Add(1) + go d.startStreamDecoder(ctx, r, d.current.output) + + return nil +} + +// drainOutput will drain the output until errEndOfStream is sent. +func (d *Decoder) drainOutput() { + if d.current.cancel != nil { + if debugDecoder { + println("cancelling current") + } + d.current.cancel() + d.current.cancel = nil + } + if d.current.d != nil { + if debugDecoder { + printf("re-adding current decoder %p, decoders: %d", d.current.d, len(d.decoders)) + } + d.decoders <- d.current.d + d.current.d = nil + d.current.b = nil + } + if d.current.output == nil || d.current.flushed { + println("current already flushed") + return + } + for v := range d.current.output { + if v.d != nil { + if debugDecoder { + printf("re-adding decoder %p", v.d) + } + d.decoders <- v.d + } + } + d.current.output = nil + d.current.flushed = true +} + +// WriteTo writes data to w until there's no more data to write or when an error occurs. +// The return value n is the number of bytes written. +// Any error encountered during the write is also returned. +func (d *Decoder) WriteTo(w io.Writer) (int64, error) { + var n int64 + for { + if len(d.current.b) > 0 { + n2, err2 := w.Write(d.current.b) + n += int64(n2) + if err2 != nil && (d.current.err == nil || d.current.err == io.EOF) { + d.current.err = err2 + } else if n2 != len(d.current.b) { + d.current.err = io.ErrShortWrite + } + } + if d.current.err != nil { + break + } + d.nextBlock(true) + } + err := d.current.err + if err != nil { + d.drainOutput() + } + if err == io.EOF { + err = nil + } + return n, err +} + +// DecodeAll allows stateless decoding of a blob of bytes. +// Output will be appended to dst, so if the destination size is known +// you can pre-allocate the destination slice to avoid allocations. +// DecodeAll can be used concurrently. +// The Decoder concurrency limits will be respected. +func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { + if d.decoders == nil { + return dst, ErrDecoderClosed + } + + // Grab a block decoder and frame decoder. + block := <-d.decoders + frame := block.localFrame + initialSize := len(dst) + defer func() { + if debugDecoder { + printf("re-adding decoder: %p", block) + } + frame.rawInput = nil + frame.bBuf = nil + if frame.history.decoders.br != nil { + frame.history.decoders.br.in = nil + } + d.decoders <- block + }() + frame.bBuf = input + + for { + frame.history.reset() + err := frame.reset(&frame.bBuf) + if err != nil { + if err == io.EOF { + if debugDecoder { + println("frame reset return EOF") + } + return dst, nil + } + return dst, err + } + if err = d.setDict(frame); err != nil { + return nil, err + } + if frame.WindowSize > d.o.maxWindowSize { + if debugDecoder { + println("window size exceeded:", frame.WindowSize, ">", d.o.maxWindowSize) + } + return dst, ErrWindowSizeExceeded + } + if frame.FrameContentSize != fcsUnknown { + if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)-initialSize) { + if debugDecoder { + println("decoder size exceeded; fcs:", frame.FrameContentSize, "> mcs:", d.o.maxDecodedSize-uint64(len(dst)-initialSize), "len:", len(dst)) + } + return dst, ErrDecoderSizeExceeded + } + if d.o.limitToCap && frame.FrameContentSize > uint64(cap(dst)-len(dst)) { + if debugDecoder { + println("decoder size exceeded; fcs:", frame.FrameContentSize, "> (cap-len)", cap(dst)-len(dst)) + } + return dst, ErrDecoderSizeExceeded + } + if cap(dst)-len(dst) < int(frame.FrameContentSize) { + dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)+compressedBlockOverAlloc) + copy(dst2, dst) + dst = dst2 + } + } + + if cap(dst) == 0 && !d.o.limitToCap { + // Allocate len(input) * 2 by default if nothing is provided + // and we didn't get frame content size. + size := len(input) * 2 + // Cap to 1 MB. + if size > 1<<20 { + size = 1 << 20 + } + if uint64(size) > d.o.maxDecodedSize { + size = int(d.o.maxDecodedSize) + } + dst = make([]byte, 0, size) + } + + dst, err = frame.runDecoder(dst, block) + if err != nil { + return dst, err + } + if uint64(len(dst)-initialSize) > d.o.maxDecodedSize { + return dst, ErrDecoderSizeExceeded + } + if len(frame.bBuf) == 0 { + if debugDecoder { + println("frame dbuf empty") + } + break + } + } + return dst, nil +} + +// nextBlock returns the next block. +// If an error occurs d.err will be set. +// Optionally the function can block for new output. +// If non-blocking mode is used the returned boolean will be false +// if no data was available without blocking. +func (d *Decoder) nextBlock(blocking bool) (ok bool) { + if d.current.err != nil { + // Keep error state. + return false + } + d.current.b = d.current.b[:0] + + // SYNC: + if d.syncStream.enabled { + if !blocking { + return false + } + ok = d.nextBlockSync() + if !ok { + d.stashDecoder() + } + return ok + } + + //ASYNC: + d.stashDecoder() + if blocking { + d.current.decodeOutput, ok = <-d.current.output + } else { + select { + case d.current.decodeOutput, ok = <-d.current.output: + default: + return false + } + } + if !ok { + // This should not happen, so signal error state... + d.current.err = io.ErrUnexpectedEOF + return false + } + next := d.current.decodeOutput + if next.d != nil && next.d.async.newHist != nil { + d.current.crc.Reset() + } + if debugDecoder { + var tmp [4]byte + binary.LittleEndian.PutUint32(tmp[:], uint32(xxhash.Sum64(next.b))) + println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp) + } + + if d.o.ignoreChecksum { + return true + } + + if len(next.b) > 0 { + d.current.crc.Write(next.b) + } + if next.err == nil && next.d != nil && next.d.hasCRC { + got := uint32(d.current.crc.Sum64()) + if got != next.d.checkCRC { + if debugDecoder { + printf("CRC Check Failed: %08x (got) != %08x (on stream)\n", got, next.d.checkCRC) + } + d.current.err = ErrCRCMismatch + } else { + if debugDecoder { + printf("CRC ok %08x\n", got) + } + } + } + + return true +} + +func (d *Decoder) nextBlockSync() (ok bool) { + if d.current.d == nil { + d.current.d = <-d.decoders + } + for len(d.current.b) == 0 { + if !d.syncStream.inFrame { + d.frame.history.reset() + d.current.err = d.frame.reset(&d.syncStream.br) + if d.current.err == nil { + d.current.err = d.setDict(d.frame) + } + if d.current.err != nil { + return false + } + if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize { + d.current.err = ErrDecoderSizeExceeded + return false + } + + d.syncStream.decodedFrame = 0 + d.syncStream.inFrame = true + } + d.current.err = d.frame.next(d.current.d) + if d.current.err != nil { + return false + } + d.frame.history.ensureBlock() + if debugDecoder { + println("History trimmed:", len(d.frame.history.b), "decoded already:", d.syncStream.decodedFrame) + } + histBefore := len(d.frame.history.b) + d.current.err = d.current.d.decodeBuf(&d.frame.history) + + if d.current.err != nil { + println("error after:", d.current.err) + return false + } + d.current.b = d.frame.history.b[histBefore:] + if debugDecoder { + println("history after:", len(d.frame.history.b)) + } + + // Check frame size (before CRC) + d.syncStream.decodedFrame += uint64(len(d.current.b)) + if d.syncStream.decodedFrame > d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeExceeded + return false + } + + // Check FCS + if d.current.d.Last && d.frame.FrameContentSize != fcsUnknown && d.syncStream.decodedFrame != d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeMismatch + return false + } + + // Update/Check CRC + if d.frame.HasCheckSum { + if !d.o.ignoreChecksum { + d.frame.crc.Write(d.current.b) + } + if d.current.d.Last { + if !d.o.ignoreChecksum { + d.current.err = d.frame.checkCRC() + } else { + d.current.err = d.frame.consumeCRC() + } + if d.current.err != nil { + println("CRC error:", d.current.err) + return false + } + } + } + d.syncStream.inFrame = !d.current.d.Last + } + return true +} + +func (d *Decoder) stashDecoder() { + if d.current.d != nil { + if debugDecoder { + printf("re-adding current decoder %p", d.current.d) + } + d.decoders <- d.current.d + d.current.d = nil + } +} + +// Close will release all resources. +// It is NOT possible to reuse the decoder after this. +func (d *Decoder) Close() { + if d.current.err == ErrDecoderClosed { + return + } + d.drainOutput() + if d.current.cancel != nil { + d.current.cancel() + d.streamWg.Wait() + d.current.cancel = nil + } + if d.decoders != nil { + close(d.decoders) + for dec := range d.decoders { + dec.Close() + } + d.decoders = nil + } + if d.current.d != nil { + d.current.d.Close() + d.current.d = nil + } + d.current.err = ErrDecoderClosed +} + +// IOReadCloser returns the decoder as an io.ReadCloser for convenience. +// Any changes to the decoder will be reflected, so the returned ReadCloser +// can be reused along with the decoder. +// io.WriterTo is also supported by the returned ReadCloser. +func (d *Decoder) IOReadCloser() io.ReadCloser { + return closeWrapper{d: d} +} + +// closeWrapper wraps a function call as a closer. +type closeWrapper struct { + d *Decoder +} + +// WriteTo forwards WriteTo calls to the decoder. +func (c closeWrapper) WriteTo(w io.Writer) (n int64, err error) { + return c.d.WriteTo(w) +} + +// Read forwards read calls to the decoder. +func (c closeWrapper) Read(p []byte) (n int, err error) { + return c.d.Read(p) +} + +// Close closes the decoder. +func (c closeWrapper) Close() error { + c.d.Close() + return nil +} + +type decodeOutput struct { + d *blockDec + b []byte + err error +} + +func (d *Decoder) startSyncDecoder(r io.Reader) error { + d.frame.history.reset() + d.syncStream.br = readerWrapper{r: r} + d.syncStream.inFrame = false + d.syncStream.enabled = true + d.syncStream.decodedFrame = 0 + return nil +} + +// Create Decoder: +// ASYNC: +// Spawn 3 go routines. +// 0: Read frames and decode block literals. +// 1: Decode sequences. +// 2: Execute sequences, send to output. +func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) { + defer d.streamWg.Done() + br := readerWrapper{r: r} + + var seqDecode = make(chan *blockDec, d.o.concurrent) + var seqExecute = make(chan *blockDec, d.o.concurrent) + + // Async 1: Decode sequences... + go func() { + var hist history + var hasErr bool + + for block := range seqDecode { + if hasErr { + if block != nil { + seqExecute <- block + } + continue + } + if block.async.newHist != nil { + if debugDecoder { + println("Async 1: new history, recent:", block.async.newHist.recentOffsets) + } + hist.reset() + hist.decoders = block.async.newHist.decoders + hist.recentOffsets = block.async.newHist.recentOffsets + hist.windowSize = block.async.newHist.windowSize + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + } + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqExecute <- block + continue + } + + hist.decoders.literals = block.async.literals + block.err = block.prepareSequences(block.async.seqData, &hist) + if debugDecoder && block.err != nil { + println("prepareSequences returned:", block.err) + } + hasErr = block.err != nil + if block.err == nil { + block.err = block.decodeSequences(&hist) + if debugDecoder && block.err != nil { + println("decodeSequences returned:", block.err) + } + hasErr = block.err != nil + // block.async.sequence = hist.decoders.seq[:hist.decoders.nSeqs] + block.async.seqSize = hist.decoders.seqSize + } + seqExecute <- block + } + close(seqExecute) + hist.reset() + }() + + var wg sync.WaitGroup + wg.Add(1) + + // Async 3: Execute sequences... + frameHistCache := d.frame.history.b + go func() { + var hist history + var decodedFrame uint64 + var fcs uint64 + var hasErr bool + for block := range seqExecute { + out := decodeOutput{err: block.err, d: block} + if block.err != nil || hasErr { + hasErr = true + output <- out + continue + } + if block.async.newHist != nil { + if debugDecoder { + println("Async 2: new history") + } + hist.reset() + hist.windowSize = block.async.newHist.windowSize + hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + + if cap(hist.b) < hist.allocFrameBuffer { + if cap(frameHistCache) >= hist.allocFrameBuffer { + hist.b = frameHistCache + } else { + hist.b = make([]byte, 0, hist.allocFrameBuffer) + println("Alloc history sized", hist.allocFrameBuffer) + } + } + hist.b = hist.b[:0] + fcs = block.async.fcs + decodedFrame = 0 + } + do := decodeOutput{err: block.err, d: block} + switch block.Type { + case blockTypeRLE: + if debugDecoder { + println("add rle block length:", block.RLESize) + } + + if cap(block.dst) < int(block.RLESize) { + if block.lowMem { + block.dst = make([]byte, block.RLESize) + } else { + block.dst = make([]byte, maxCompressedBlockSize) + } + } + block.dst = block.dst[:block.RLESize] + v := block.data[0] + for i := range block.dst { + block.dst[i] = v + } + hist.append(block.dst) + do.b = block.dst + case blockTypeRaw: + if debugDecoder { + println("add raw block length:", len(block.data)) + } + hist.append(block.data) + do.b = block.data + case blockTypeCompressed: + if debugDecoder { + println("execute with history length:", len(hist.b), "window:", hist.windowSize) + } + hist.decoders.seqSize = block.async.seqSize + hist.decoders.literals = block.async.literals + do.err = block.executeSequences(&hist) + hasErr = do.err != nil + if debugDecoder && hasErr { + println("executeSequences returned:", do.err) + } + do.b = block.dst + } + if !hasErr { + decodedFrame += uint64(len(do.b)) + if decodedFrame > fcs { + println("fcs exceeded", block.Last, fcs, decodedFrame) + do.err = ErrFrameSizeExceeded + hasErr = true + } else if block.Last && fcs != fcsUnknown && decodedFrame != fcs { + do.err = ErrFrameSizeMismatch + hasErr = true + } else { + if debugDecoder { + println("fcs ok", block.Last, fcs, decodedFrame) + } + } + } + output <- do + } + close(output) + frameHistCache = hist.b + wg.Done() + if debugDecoder { + println("decoder goroutines finished") + } + hist.reset() + }() + + var hist history +decodeStream: + for { + var hasErr bool + hist.reset() + decodeBlock := func(block *blockDec) { + if hasErr { + if block != nil { + seqDecode <- block + } + return + } + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqDecode <- block + return + } + + remain, err := block.decodeLiterals(block.data, &hist) + block.err = err + hasErr = block.err != nil + if err == nil { + block.async.literals = hist.decoders.literals + block.async.seqData = remain + } else if debugDecoder { + println("decodeLiterals error:", err) + } + seqDecode <- block + } + frame := d.frame + if debugDecoder { + println("New frame...") + } + var historySent bool + frame.history.reset() + err := frame.reset(&br) + if debugDecoder && err != nil { + println("Frame decoder returned", err) + } + if err == nil { + err = d.setDict(frame) + } + if err == nil && d.frame.WindowSize > d.o.maxWindowSize { + if debugDecoder { + println("decoder size exceeded, fws:", d.frame.WindowSize, "> mws:", d.o.maxWindowSize) + } + + err = ErrDecoderSizeExceeded + } + if err != nil { + select { + case <-ctx.Done(): + case dec := <-d.decoders: + dec.sendErr(err) + decodeBlock(dec) + } + break decodeStream + } + + // Go through all blocks of the frame. + for { + var dec *blockDec + select { + case <-ctx.Done(): + break decodeStream + case dec = <-d.decoders: + // Once we have a decoder, we MUST return it. + } + err := frame.next(dec) + if !historySent { + h := frame.history + if debugDecoder { + println("Alloc History:", h.allocFrameBuffer) + } + hist.reset() + if h.dict != nil { + hist.setDict(h.dict) + } + dec.async.newHist = &h + dec.async.fcs = frame.FrameContentSize + historySent = true + } else { + dec.async.newHist = nil + } + if debugDecoder && err != nil { + println("next block returned error:", err) + } + dec.err = err + dec.hasCRC = false + if dec.Last && frame.HasCheckSum && err == nil { + crc, err := frame.rawInput.readSmall(4) + if len(crc) < 4 { + if err == nil { + err = io.ErrUnexpectedEOF + + } + println("CRC missing?", err) + dec.err = err + } else { + dec.checkCRC = binary.LittleEndian.Uint32(crc) + dec.hasCRC = true + if debugDecoder { + printf("found crc to check: %08x\n", dec.checkCRC) + } + } + } + err = dec.err + last := dec.Last + decodeBlock(dec) + if err != nil { + break decodeStream + } + if last { + break + } + } + } + close(seqDecode) + wg.Wait() + hist.reset() + d.frame.history.b = frameHistCache +} + +func (d *Decoder) setDict(frame *frameDec) (err error) { + dict, ok := d.dicts[frame.DictionaryID] + if ok { + if debugDecoder { + println("setting dict", frame.DictionaryID) + } + frame.history.setDict(dict) + } else if frame.DictionaryID != 0 { + // A zero or missing dictionary id is ambiguous: + // either dictionary zero, or no dictionary. In particular, + // zstd --patch-from uses this id for the source file, + // so only return an error if the dictionary id is not zero. + err = ErrUnknownDictionary + } + return err +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go new file mode 100644 index 0000000000000..774c5f00fe420 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -0,0 +1,169 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math/bits" + "runtime" +) + +// DOption is an option for creating a decoder. +type DOption func(*decoderOptions) error + +// options retains accumulated state of multiple options. +type decoderOptions struct { + lowMem bool + concurrent int + maxDecodedSize uint64 + maxWindowSize uint64 + dicts []*dict + ignoreChecksum bool + limitToCap bool + decodeBufsBelow int +} + +func (o *decoderOptions) setDefault() { + *o = decoderOptions{ + // use less ram: true for now, but may change. + lowMem: true, + concurrent: runtime.GOMAXPROCS(0), + maxWindowSize: MaxWindowSize, + decodeBufsBelow: 128 << 10, + } + if o.concurrent > 4 { + o.concurrent = 4 + } + o.maxDecodedSize = 64 << 30 +} + +// WithDecoderLowmem will set whether to use a lower amount of memory, +// but possibly have to allocate more while running. +func WithDecoderLowmem(b bool) DOption { + return func(o *decoderOptions) error { o.lowMem = b; return nil } +} + +// WithDecoderConcurrency sets the number of created decoders. +// When decoding block with DecodeAll, this will limit the number +// of possible concurrently running decodes. +// When decoding streams, this will limit the number of +// inflight blocks. +// When decoding streams and setting maximum to 1, +// no async decoding will be done. +// When a value of 0 is provided GOMAXPROCS will be used. +// By default this will be set to 4 or GOMAXPROCS, whatever is lower. +func WithDecoderConcurrency(n int) DOption { + return func(o *decoderOptions) error { + if n < 0 { + return errors.New("concurrency must be at least 1") + } + if n == 0 { + o.concurrent = runtime.GOMAXPROCS(0) + } else { + o.concurrent = n + } + return nil + } +} + +// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory +// non-streaming operations or maximum window size for streaming operations. +// This can be used to control memory usage of potentially hostile content. +// Maximum is 1 << 63 bytes. Default is 64GiB. +func WithDecoderMaxMemory(n uint64) DOption { + return func(o *decoderOptions) error { + if n == 0 { + return errors.New("WithDecoderMaxMemory must be at least 1") + } + if n > 1<<63 { + return errors.New("WithDecoderMaxmemory must be less than 1 << 63") + } + o.maxDecodedSize = n + return nil + } +} + +// WithDecoderDicts allows to register one or more dictionaries for the decoder. +// +// Each slice in dict must be in the [dictionary format] produced by +// "zstd --train" from the Zstandard reference implementation. +// +// If several dictionaries with the same ID are provided, the last one will be used. +// +// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format +func WithDecoderDicts(dicts ...[]byte) DOption { + return func(o *decoderOptions) error { + for _, b := range dicts { + d, err := loadDict(b) + if err != nil { + return err + } + o.dicts = append(o.dicts, d) + } + return nil + } +} + +// WithDecoderDictRaw registers a dictionary that may be used by the decoder. +// The slice content can be arbitrary data. +func WithDecoderDictRaw(id uint32, content []byte) DOption { + return func(o *decoderOptions) error { + if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { + return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) + } + o.dicts = append(o.dicts, &dict{id: id, content: content, offsets: [3]int{1, 4, 8}}) + return nil + } +} + +// WithDecoderMaxWindow allows to set a maximum window size for decodes. +// This allows rejecting packets that will cause big memory usage. +// The Decoder will likely allocate more memory based on the WithDecoderLowmem setting. +// If WithDecoderMaxMemory is set to a lower value, that will be used. +// Default is 512MB, Maximum is ~3.75 TB as per zstandard spec. +func WithDecoderMaxWindow(size uint64) DOption { + return func(o *decoderOptions) error { + if size < MinWindowSize { + return errors.New("WithMaxWindowSize must be at least 1KB, 1024 bytes") + } + if size > (1<<41)+7*(1<<38) { + return errors.New("WithMaxWindowSize must be less than (1<<41) + 7*(1<<38) ~ 3.75TB") + } + o.maxWindowSize = size + return nil + } +} + +// WithDecodeAllCapLimit will limit DecodeAll to decoding cap(dst)-len(dst) bytes, +// or any size set in WithDecoderMaxMemory. +// This can be used to limit decoding to a specific maximum output size. +// Disabled by default. +func WithDecodeAllCapLimit(b bool) DOption { + return func(o *decoderOptions) error { + o.limitToCap = b + return nil + } +} + +// WithDecodeBuffersBelow will fully decode readers that have a +// `Bytes() []byte` and `Len() int` interface similar to bytes.Buffer. +// This typically uses less allocations but will have the full decompressed object in memory. +// Note that DecodeAllCapLimit will disable this, as well as giving a size of 0 or less. +// Default is 128KiB. +func WithDecodeBuffersBelow(size int) DOption { + return func(o *decoderOptions) error { + o.decodeBufsBelow = size + return nil + } +} + +// IgnoreChecksum allows to forcibly ignore checksum checking. +func IgnoreChecksum(b bool) DOption { + return func(o *decoderOptions) error { + o.ignoreChecksum = b + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go new file mode 100644 index 0000000000000..b7b83164bc76d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -0,0 +1,565 @@ +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "sort" + + "github.com/klauspost/compress/huff0" +) + +type dict struct { + id uint32 + + litEnc *huff0.Scratch + llDec, ofDec, mlDec sequenceDec + offsets [3]int + content []byte +} + +const dictMagic = "\x37\xa4\x30\xec" + +// Maximum dictionary size for the reference implementation (1.5.3) is 2 GiB. +const dictMaxLength = 1 << 31 + +// ID returns the dictionary id or 0 if d is nil. +func (d *dict) ID() uint32 { + if d == nil { + return 0 + } + return d.id +} + +// ContentSize returns the dictionary content size or 0 if d is nil. +func (d *dict) ContentSize() int { + if d == nil { + return 0 + } + return len(d.content) +} + +// Content returns the dictionary content. +func (d *dict) Content() []byte { + if d == nil { + return nil + } + return d.content +} + +// Offsets returns the initial offsets. +func (d *dict) Offsets() [3]int { + if d == nil { + return [3]int{} + } + return d.offsets +} + +// LitEncoder returns the literal encoder. +func (d *dict) LitEncoder() *huff0.Scratch { + if d == nil { + return nil + } + return d.litEnc +} + +// Load a dictionary as described in +// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format +func loadDict(b []byte) (*dict, error) { + // Check static field size. + if len(b) <= 8+(3*4) { + return nil, io.ErrUnexpectedEOF + } + d := dict{ + llDec: sequenceDec{fse: &fseDecoder{}}, + ofDec: sequenceDec{fse: &fseDecoder{}}, + mlDec: sequenceDec{fse: &fseDecoder{}}, + } + if string(b[:4]) != dictMagic { + return nil, ErrMagicMismatch + } + d.id = binary.LittleEndian.Uint32(b[4:8]) + if d.id == 0 { + return nil, errors.New("dictionaries cannot have ID 0") + } + + // Read literal table + var err error + d.litEnc, b, err = huff0.ReadTable(b[8:], nil) + if err != nil { + return nil, fmt.Errorf("loading literal table: %w", err) + } + d.litEnc.Reuse = huff0.ReusePolicyMust + + br := byteReader{ + b: b, + off: 0, + } + readDec := func(i tableIndex, dec *fseDecoder) error { + if err := dec.readNCount(&br, uint16(maxTableSymbol[i])); err != nil { + return err + } + if br.overread() { + return io.ErrUnexpectedEOF + } + err = dec.transform(symbolTableX[i]) + if err != nil { + println("Transform table error:", err) + return err + } + if debugDecoder || debugEncoder { + println("Read table ok", "symbolLen:", dec.symbolLen) + } + // Set decoders as predefined so they aren't reused. + dec.preDefined = true + return nil + } + + if err := readDec(tableOffsets, d.ofDec.fse); err != nil { + return nil, err + } + if err := readDec(tableMatchLengths, d.mlDec.fse); err != nil { + return nil, err + } + if err := readDec(tableLiteralLengths, d.llDec.fse); err != nil { + return nil, err + } + if br.remain() < 12 { + return nil, io.ErrUnexpectedEOF + } + + d.offsets[0] = int(br.Uint32()) + br.advance(4) + d.offsets[1] = int(br.Uint32()) + br.advance(4) + d.offsets[2] = int(br.Uint32()) + br.advance(4) + if d.offsets[0] <= 0 || d.offsets[1] <= 0 || d.offsets[2] <= 0 { + return nil, errors.New("invalid offset in dictionary") + } + d.content = make([]byte, br.remain()) + copy(d.content, br.unread()) + if d.offsets[0] > len(d.content) || d.offsets[1] > len(d.content) || d.offsets[2] > len(d.content) { + return nil, fmt.Errorf("initial offset bigger than dictionary content size %d, offsets: %v", len(d.content), d.offsets) + } + + return &d, nil +} + +// InspectDictionary loads a zstd dictionary and provides functions to inspect the content. +func InspectDictionary(b []byte) (interface { + ID() uint32 + ContentSize() int + Content() []byte + Offsets() [3]int + LitEncoder() *huff0.Scratch +}, error) { + initPredefined() + d, err := loadDict(b) + return d, err +} + +type BuildDictOptions struct { + // Dictionary ID. + ID uint32 + + // Content to use to create dictionary tables. + Contents [][]byte + + // History to use for all blocks. + History []byte + + // Offsets to use. + Offsets [3]int + + // CompatV155 will make the dictionary compatible with Zstd v1.5.5 and earlier. + // See https://github.com/facebook/zstd/issues/3724 + CompatV155 bool + + // Use the specified encoder level. + // The dictionary will be built using the specified encoder level, + // which will reflect speed and make the dictionary tailored for that level. + // If not set SpeedBestCompression will be used. + Level EncoderLevel + + // DebugOut will write stats and other details here if set. + DebugOut io.Writer +} + +func BuildDict(o BuildDictOptions) ([]byte, error) { + initPredefined() + hist := o.History + contents := o.Contents + debug := o.DebugOut != nil + println := func(args ...interface{}) { + if o.DebugOut != nil { + fmt.Fprintln(o.DebugOut, args...) + } + } + printf := func(s string, args ...interface{}) { + if o.DebugOut != nil { + fmt.Fprintf(o.DebugOut, s, args...) + } + } + print := func(args ...interface{}) { + if o.DebugOut != nil { + fmt.Fprint(o.DebugOut, args...) + } + } + + if int64(len(hist)) > dictMaxLength { + return nil, fmt.Errorf("dictionary of size %d > %d", len(hist), int64(dictMaxLength)) + } + if len(hist) < 8 { + return nil, fmt.Errorf("dictionary of size %d < %d", len(hist), 8) + } + if len(contents) == 0 { + return nil, errors.New("no content provided") + } + d := dict{ + id: o.ID, + litEnc: nil, + llDec: sequenceDec{}, + ofDec: sequenceDec{}, + mlDec: sequenceDec{}, + offsets: o.Offsets, + content: hist, + } + block := blockEnc{lowMem: false} + block.init() + enc := encoder(&bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(maxMatchLen), bufferReset: math.MaxInt32 - int32(maxMatchLen*2), lowMem: false}}) + if o.Level != 0 { + eOpts := encoderOptions{ + level: o.Level, + blockSize: maxMatchLen, + windowSize: maxMatchLen, + dict: &d, + lowMem: false, + } + enc = eOpts.encoder() + } else { + o.Level = SpeedBestCompression + } + var ( + remain [256]int + ll [256]int + ml [256]int + of [256]int + ) + addValues := func(dst *[256]int, src []byte) { + for _, v := range src { + dst[v]++ + } + } + addHist := func(dst *[256]int, src *[256]uint32) { + for i, v := range src { + dst[i] += int(v) + } + } + seqs := 0 + nUsed := 0 + litTotal := 0 + newOffsets := make(map[uint32]int, 1000) + for _, b := range contents { + block.reset(nil) + if len(b) < 8 { + continue + } + nUsed++ + enc.Reset(&d, true) + enc.Encode(&block, b) + addValues(&remain, block.literals) + litTotal += len(block.literals) + if len(block.sequences) == 0 { + continue + } + seqs += len(block.sequences) + block.genCodes() + addHist(&ll, block.coders.llEnc.Histogram()) + addHist(&ml, block.coders.mlEnc.Histogram()) + addHist(&of, block.coders.ofEnc.Histogram()) + for i, seq := range block.sequences { + if i > 3 { + break + } + offset := seq.offset + if offset == 0 { + continue + } + if int(offset) >= len(o.History) { + continue + } + if offset > 3 { + newOffsets[offset-3]++ + } else { + newOffsets[uint32(o.Offsets[offset-1])]++ + } + } + } + // Find most used offsets. + var sortedOffsets []uint32 + for k := range newOffsets { + sortedOffsets = append(sortedOffsets, k) + } + sort.Slice(sortedOffsets, func(i, j int) bool { + a, b := sortedOffsets[i], sortedOffsets[j] + if a == b { + // Prefer the longer offset + return sortedOffsets[i] > sortedOffsets[j] + } + return newOffsets[sortedOffsets[i]] > newOffsets[sortedOffsets[j]] + }) + if len(sortedOffsets) > 3 { + if debug { + print("Offsets:") + for i, v := range sortedOffsets { + if i > 20 { + break + } + printf("[%d: %d],", v, newOffsets[v]) + } + println("") + } + + sortedOffsets = sortedOffsets[:3] + } + for i, v := range sortedOffsets { + o.Offsets[i] = int(v) + } + if debug { + println("New repeat offsets", o.Offsets) + } + + if nUsed == 0 || seqs == 0 { + return nil, fmt.Errorf("%d blocks, %d sequences found", nUsed, seqs) + } + if debug { + println("Sequences:", seqs, "Blocks:", nUsed, "Literals:", litTotal) + } + if seqs/nUsed < 512 { + // Use 512 as minimum. + nUsed = seqs / 512 + if nUsed == 0 { + nUsed = 1 + } + } + copyHist := func(dst *fseEncoder, src *[256]int) ([]byte, error) { + hist := dst.Histogram() + var maxSym uint8 + var maxCount int + var fakeLength int + for i, v := range src { + if v > 0 { + v = v / nUsed + if v == 0 { + v = 1 + } + } + if v > maxCount { + maxCount = v + } + if v != 0 { + maxSym = uint8(i) + } + fakeLength += v + hist[i] = uint32(v) + } + + // Ensure we aren't trying to represent RLE. + if maxCount == fakeLength { + for i := range hist { + if uint8(i) == maxSym { + fakeLength++ + maxSym++ + hist[i+1] = 1 + if maxSym > 1 { + break + } + } + if hist[0] == 0 { + fakeLength++ + hist[i] = 1 + if maxSym > 1 { + break + } + } + } + } + + dst.HistogramFinished(maxSym, maxCount) + dst.reUsed = false + dst.useRLE = false + err := dst.normalizeCount(fakeLength) + if err != nil { + return nil, err + } + if debug { + println("RAW:", dst.count[:maxSym+1], "NORM:", dst.norm[:maxSym+1], "LEN:", fakeLength) + } + return dst.writeCount(nil) + } + if debug { + print("Literal lengths: ") + } + llTable, err := copyHist(block.coders.llEnc, &ll) + if err != nil { + return nil, err + } + if debug { + print("Match lengths: ") + } + mlTable, err := copyHist(block.coders.mlEnc, &ml) + if err != nil { + return nil, err + } + if debug { + print("Offsets: ") + } + ofTable, err := copyHist(block.coders.ofEnc, &of) + if err != nil { + return nil, err + } + + // Literal table + avgSize := litTotal + if avgSize > huff0.BlockSizeMax/2 { + avgSize = huff0.BlockSizeMax / 2 + } + huffBuff := make([]byte, 0, avgSize) + // Target size + div := litTotal / avgSize + if div < 1 { + div = 1 + } + if debug { + println("Huffman weights:") + } + for i, n := range remain[:] { + if n > 0 { + n = n / div + // Allow all entries to be represented. + if n == 0 { + n = 1 + } + huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) + if debug { + printf("[%d: %d], ", i, n) + } + } + } + if o.CompatV155 && remain[255]/div == 0 { + huffBuff = append(huffBuff, 255) + } + scratch := &huff0.Scratch{TableLog: 11} + for tries := 0; tries < 255; tries++ { + scratch = &huff0.Scratch{TableLog: 11} + _, _, err = huff0.Compress1X(huffBuff, scratch) + if err == nil { + break + } + if debug { + printf("Try %d: Huffman error: %v\n", tries+1, err) + } + huffBuff = huffBuff[:0] + if tries == 250 { + if debug { + println("Huffman: Bailing out with predefined table") + } + + // Bail out.... Just generate something + huffBuff = append(huffBuff, bytes.Repeat([]byte{255}, 10000)...) + for i := 0; i < 128; i++ { + huffBuff = append(huffBuff, byte(i)) + } + continue + } + if errors.Is(err, huff0.ErrIncompressible) { + // Try truncating least common. + for i, n := range remain[:] { + if n > 0 { + n = n / (div * (i + 1)) + if n > 0 { + huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) + } + } + } + if o.CompatV155 && len(huffBuff) > 0 && huffBuff[len(huffBuff)-1] != 255 { + huffBuff = append(huffBuff, 255) + } + if len(huffBuff) == 0 { + huffBuff = append(huffBuff, 0, 255) + } + } + if errors.Is(err, huff0.ErrUseRLE) { + for i, n := range remain[:] { + n = n / (div * (i + 1)) + // Allow all entries to be represented. + if n == 0 { + n = 1 + } + huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) + } + } + } + + var out bytes.Buffer + out.Write([]byte(dictMagic)) + out.Write(binary.LittleEndian.AppendUint32(nil, o.ID)) + out.Write(scratch.OutTable) + if debug { + println("huff table:", len(scratch.OutTable), "bytes") + println("of table:", len(ofTable), "bytes") + println("ml table:", len(mlTable), "bytes") + println("ll table:", len(llTable), "bytes") + } + out.Write(ofTable) + out.Write(mlTable) + out.Write(llTable) + out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[0]))) + out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[1]))) + out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[2]))) + out.Write(hist) + if debug { + _, err := loadDict(out.Bytes()) + if err != nil { + panic(err) + } + i, err := InspectDictionary(out.Bytes()) + if err != nil { + panic(err) + } + println("ID:", i.ID()) + println("Content size:", i.ContentSize()) + println("Encoder:", i.LitEncoder() != nil) + println("Offsets:", i.Offsets()) + var totalSize int + for _, b := range contents { + totalSize += len(b) + } + + encWith := func(opts ...EOption) int { + enc, err := NewWriter(nil, opts...) + if err != nil { + panic(err) + } + defer enc.Close() + var dst []byte + var totalSize int + for _, b := range contents { + dst = enc.EncodeAll(b, dst[:0]) + totalSize += len(dst) + } + return totalSize + } + plain := encWith(WithEncoderLevel(o.Level)) + withDict := encWith(WithEncoderLevel(o.Level), WithEncoderDict(out.Bytes())) + println("Input size:", totalSize) + println("Plain Compressed:", plain) + println("Dict Compressed:", withDict) + println("Saved:", plain-withDict, (plain-withDict)/len(contents), "bytes per input (rounded down)") + } + return out.Bytes(), nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go new file mode 100644 index 0000000000000..5ca46038ad9fc --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -0,0 +1,173 @@ +package zstd + +import ( + "fmt" + "math/bits" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +const ( + dictShardBits = 6 +) + +type fastBase struct { + // cur is the offset at the start of hist + cur int32 + // maximum offset. Should be at least 2x block size. + maxMatchOff int32 + bufferReset int32 + hist []byte + crc *xxhash.Digest + tmp [8]byte + blk *blockEnc + lastDictID uint32 + lowMem bool +} + +// CRC returns the underlying CRC writer. +func (e *fastBase) CRC() *xxhash.Digest { + return e.crc +} + +// AppendCRC will append the CRC to the destination slice and return it. +func (e *fastBase) AppendCRC(dst []byte) []byte { + crc := e.crc.Sum(e.tmp[:0]) + dst = append(dst, crc[7], crc[6], crc[5], crc[4]) + return dst +} + +// WindowSize returns the window size of the encoder, +// or a window size small enough to contain the input size, if > 0. +func (e *fastBase) WindowSize(size int64) int32 { + if size > 0 && size < int64(e.maxMatchOff) { + b := int32(1) << uint(bits.Len(uint(size))) + // Keep minimum window. + if b < 1024 { + b = 1024 + } + return b + } + return e.maxMatchOff +} + +// Block returns the current block. +func (e *fastBase) Block() *blockEnc { + return e.blk +} + +func (e *fastBase) addBlock(src []byte) int32 { + if debugAsserts && e.cur > e.bufferReset { + panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, e.bufferReset)) + } + // check if we have space already + if len(e.hist)+len(src) > cap(e.hist) { + if cap(e.hist) == 0 { + e.ensureHist(len(src)) + } else { + if cap(e.hist) < int(e.maxMatchOff+maxCompressedBlockSize) { + panic(fmt.Errorf("unexpected buffer cap %d, want at least %d with window %d", cap(e.hist), e.maxMatchOff+maxCompressedBlockSize, e.maxMatchOff)) + } + // Move down + offset := int32(len(e.hist)) - e.maxMatchOff + copy(e.hist[0:e.maxMatchOff], e.hist[offset:]) + e.cur += offset + e.hist = e.hist[:e.maxMatchOff] + } + } + s := int32(len(e.hist)) + e.hist = append(e.hist, src...) + return s +} + +// ensureHist will ensure that history can keep at least this many bytes. +func (e *fastBase) ensureHist(n int) { + if cap(e.hist) >= n { + return + } + l := e.maxMatchOff + if (e.lowMem && e.maxMatchOff > maxCompressedBlockSize) || e.maxMatchOff <= maxCompressedBlockSize { + l += maxCompressedBlockSize + } else { + l += e.maxMatchOff + } + // Make it at least 1MB. + if l < 1<<20 && !e.lowMem { + l = 1 << 20 + } + // Make it at least the requested size. + if l < int32(n) { + l = int32(n) + } + e.hist = make([]byte, 0, l) +} + +// useBlock will replace the block with the provided one, +// but transfer recent offsets from the previous. +func (e *fastBase) UseBlock(enc *blockEnc) { + enc.reset(e.blk) + e.blk = enc +} + +func (e *fastBase) matchlen(s, t int32, src []byte) int32 { + if debugAsserts { + if s < 0 { + err := fmt.Sprintf("s (%d) < 0", s) + panic(err) + } + if t < 0 { + err := fmt.Sprintf("s (%d) < 0", s) + panic(err) + } + if s-t > e.maxMatchOff { + err := fmt.Sprintf("s (%d) - t (%d) > maxMatchOff (%d)", s, t, e.maxMatchOff) + panic(err) + } + if len(src)-int(s) > maxCompressedBlockSize { + panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize)) + } + } + return int32(matchLen(src[s:], src[t:])) +} + +// Reset the encoding table. +func (e *fastBase) resetBase(d *dict, singleBlock bool) { + if e.blk == nil { + e.blk = &blockEnc{lowMem: e.lowMem} + e.blk.init() + } else { + e.blk.reset(nil) + } + e.blk.initNewEncode() + if e.crc == nil { + e.crc = xxhash.New() + } else { + e.crc.Reset() + } + e.blk.dictLitEnc = nil + if d != nil { + low := e.lowMem + if singleBlock { + e.lowMem = true + } + e.ensureHist(d.ContentSize() + maxCompressedBlockSize) + e.lowMem = low + } + + // We offset current position so everything will be out of reach. + // If above reset line, history will be purged. + if e.cur < e.bufferReset { + e.cur += e.maxMatchOff + int32(len(e.hist)) + } + e.hist = e.hist[:0] + if d != nil { + // Set offsets (currently not used) + for i, off := range d.offsets { + e.blk.recentOffsets[i] = uint32(off) + e.blk.prevRecentOffsets[i] = e.blk.recentOffsets[i] + } + // Transfer litenc. + e.blk.dictLitEnc = d.litEnc + e.hist = append(e.hist, d.content...) + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go new file mode 100644 index 0000000000000..4613724e9d141 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -0,0 +1,560 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "bytes" + "fmt" + + "github.com/klauspost/compress" +) + +const ( + bestLongTableBits = 22 // Bits used in the long match table + bestLongTableSize = 1 << bestLongTableBits // Size of the table + bestLongLen = 8 // Bytes used for table hash + + // Note: Increasing the short table bits or making the hash shorter + // can actually lead to compression degradation since it will 'steal' more from the + // long match table and match offsets are quite big. + // This greatly depends on the type of input. + bestShortTableBits = 18 // Bits used in the short match table + bestShortTableSize = 1 << bestShortTableBits // Size of the table + bestShortLen = 4 // Bytes used for table hash + +) + +type match struct { + offset int32 + s int32 + length int32 + rep int32 + est int32 +} + +const highScore = maxMatchLen * 8 + +// estBits will estimate output bits from predefined tables. +func (m *match) estBits(bitsPerByte int32) { + mlc := mlCode(uint32(m.length - zstdMinMatch)) + var ofc uint8 + if m.rep < 0 { + ofc = ofCode(uint32(m.s-m.offset) + 3) + } else { + ofc = ofCode(uint32(m.rep) & 3) + } + // Cost, excluding + ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc] + + // Add cost of match encoding... + m.est = int32(ofTT.outBits + mlTT.outBits) + m.est += int32(ofTT.deltaNbBits>>16 + mlTT.deltaNbBits>>16) + // Subtract savings compared to literal encoding... + m.est -= (m.length * bitsPerByte) >> 10 + if m.est > 0 { + // Unlikely gain.. + m.length = 0 + m.est = highScore + } +} + +// bestFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. +// The long match table contains the previous entry with the same hash, +// effectively making it a "chain" of length 2. +// When we find a long match we choose between the two values and select the longest. +// When we find a short match, after checking the long, we check if we can find a long at n+1 +// and that it is longer (lazy matching). +type bestFastEncoder struct { + fastBase + table [bestShortTableSize]prevEntry + longTable [bestLongTableSize]prevEntry + dictTable []prevEntry + dictLongTable []prevEntry +} + +// Encode improves compression... +func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 4 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [bestShortTableSize]prevEntry{} + e.longTable = [bestLongTableSize]prevEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + v2 := e.table[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.table[i] = prevEntry{ + offset: v, + prev: v2, + } + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.cur = e.maxMatchOff + break + } + + // Add block to history + s := e.addBlock(src) + blk.size = len(src) + + // Check RLE first + if len(src) > zstdMinMatch { + ml := matchLen(src[1:], src) + if ml == len(src)-1 { + blk.literals = append(blk.literals, src[0]) + blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3}) + return + } + } + + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Use this to estimate literal cost. + // Scaled by 10 bits. + bitsPerByte := int32((compress.ShannonEntropyBits(src) * 1024) / len(src)) + // Huffman can never go < 1 bit/byte + if bitsPerByte < 1024 { + bitsPerByte = 1024 + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + const kSearchStrength = 10 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + offset3 := int32(blk.recentOffsets[2]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + const goodEnough = 250 + + cv := load6432(src, s) + + nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) + nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + // Set m to a match at offset if it looks like that will improve compression. + improve := func(m *match, offset int32, s int32, first uint32, rep int32) { + delta := s - offset + if delta >= e.maxMatchOff || delta <= 0 || load3232(src, offset) != first { + return + } + // Try to quick reject if we already have a long match. + if m.length > 16 { + left := len(src) - int(m.s+m.length) + // If we are too close to the end, keep as is. + if left <= 0 { + return + } + checkLen := m.length - (s - m.s) - 8 + if left > 2 && checkLen > 4 { + // Check 4 bytes, 4 bytes from the end of the current match. + a := load3232(src, offset+checkLen) + b := load3232(src, s+checkLen) + if a != b { + return + } + } + } + l := 4 + e.matchlen(s+4, offset+4, src) + if m.rep <= 0 { + // Extend candidate match backwards as far as possible. + // Do not extend repeats as we can assume they are optimal + // and offsets change if s == nextEmit. + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for offset > tMin && s > nextEmit && src[offset-1] == src[s-1] && l < maxMatchLength { + s-- + offset-- + l++ + } + } + if debugAsserts { + if offset >= s { + panic(fmt.Sprintf("offset: %d - s:%d - rep: %d - cur :%d - max: %d", offset, s, rep, e.cur, e.maxMatchOff)) + } + if !bytes.Equal(src[s:s+l], src[offset:offset+l]) { + panic(fmt.Sprintf("second match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) + } + } + cand := match{offset: offset, s: s, length: l, rep: rep} + cand.estBits(bitsPerByte) + if m.est >= highScore || cand.est-m.est+(cand.s-m.s)*bitsPerByte>>10 < 0 { + *m = cand + } + } + + best := match{s: s, est: highScore} + improve(&best, candidateL.offset-e.cur, s, uint32(cv), -1) + improve(&best, candidateL.prev-e.cur, s, uint32(cv), -1) + improve(&best, candidateS.offset-e.cur, s, uint32(cv), -1) + improve(&best, candidateS.prev-e.cur, s, uint32(cv), -1) + + if canRepeat && best.length < goodEnough { + if s == nextEmit { + // Check repeats straight after a match. + improve(&best, s-offset2, s, uint32(cv), 1|4) + improve(&best, s-offset3, s, uint32(cv), 2|4) + if offset1 > 1 { + improve(&best, s-(offset1-1), s, uint32(cv), 3|4) + } + } + + // If either no match or a non-repeat match, check at + 1 + if best.rep <= 0 { + cv32 := uint32(cv >> 8) + spp := s + 1 + improve(&best, spp-offset1, spp, cv32, 1) + improve(&best, spp-offset2, spp, cv32, 2) + improve(&best, spp-offset3, spp, cv32, 3) + if best.rep < 0 { + cv32 = uint32(cv >> 24) + spp += 2 + improve(&best, spp-offset1, spp, cv32, 1) + improve(&best, spp-offset2, spp, cv32, 2) + improve(&best, spp-offset3, spp, cv32, 3) + } + } + } + // Load next and check... + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset} + e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset} + index0 := s + 1 + + // Look far ahead, unless we have a really long match already... + if best.length < goodEnough { + // No match found, move forward on input, no need to check forward... + if best.length < 4 { + s += 1 + (s-nextEmit)>>(kSearchStrength-1) + if s >= sLimit { + break encodeLoop + } + continue + } + + candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)] + cv = load6432(src, s+1) + cv2 := load6432(src, s+2) + candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)] + candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)] + + // Short at s+1 + improve(&best, candidateS.offset-e.cur, s+1, uint32(cv), -1) + // Long at s+1, s+2 + improve(&best, candidateL.offset-e.cur, s+1, uint32(cv), -1) + improve(&best, candidateL.prev-e.cur, s+1, uint32(cv), -1) + improve(&best, candidateL2.offset-e.cur, s+2, uint32(cv2), -1) + improve(&best, candidateL2.prev-e.cur, s+2, uint32(cv2), -1) + if false { + // Short at s+3. + // Too often worse... + improve(&best, e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+3, uint32(cv2>>8), -1) + } + + // Start check at a fixed offset to allow for a few mismatches. + // For this compression level 2 yields the best results. + // We cannot do this if we have already indexed this position. + const skipBeginning = 2 + if best.s > s-skipBeginning { + // See if we can find a better match by checking where the current best ends. + // Use that offset to see if we can find a better full match. + if sAt := best.s + best.length; sAt < sLimit { + nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen) + candidateEnd := e.longTable[nextHashL] + + if off := candidateEnd.offset - e.cur - best.length + skipBeginning; off >= 0 { + improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) + if off := candidateEnd.prev - e.cur - best.length + skipBeginning; off >= 0 { + improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) + } + } + } + } + } + + if debugAsserts { + if best.offset >= best.s { + panic(fmt.Sprintf("best.offset > s: %d >= %d", best.offset, best.s)) + } + if best.s < nextEmit { + panic(fmt.Sprintf("s %d < nextEmit %d", best.s, nextEmit)) + } + if best.offset < s-e.maxMatchOff { + panic(fmt.Sprintf("best.offset < s-e.maxMatchOff: %d < %d", best.offset, s-e.maxMatchOff)) + } + if !bytes.Equal(src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]) { + panic(fmt.Sprintf("match mismatch: %v != %v", src[best.s:best.s+best.length], src[best.offset:best.offset+best.length])) + } + } + + // We have a match, we can store the forward value + s = best.s + if best.rep > 0 { + var seq seq + seq.matchLen = uint32(best.length - zstdMinMatch) + addLiterals(&seq, best.s) + + // Repeat. If bit 4 is set, this is a non-lit repeat. + seq.offset = uint32(best.rep & 3) + if debugSequences { + println("repeat sequence", seq, "next s:", best.s, "off:", best.s-best.offset) + } + blk.sequences = append(blk.sequences, seq) + + // Index old s + 1 -> s - 1 + s = best.s + best.length + nextEmit = s + + // Index skipped... + end := s + if s > sLimit+4 { + end = sLimit + 4 + } + off := index0 + e.cur + for index0 < end { + cv0 := load6432(src, index0) + h0 := hashLen(cv0, bestLongTableBits, bestLongLen) + h1 := hashLen(cv0, bestShortTableBits, bestShortLen) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} + off++ + index0++ + } + + switch best.rep { + case 2, 4 | 1: + offset1, offset2 = offset2, offset1 + case 3, 4 | 2: + offset1, offset2, offset3 = offset3, offset1, offset2 + case 4 | 3: + offset1, offset2, offset3 = offset1-1, offset1, offset2 + } + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, best.length) + } + break encodeLoop + } + continue + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + t := best.offset + offset1, offset2, offset3 = s-t, offset1, offset2 + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && int(offset1) > len(src) { + panic("invalid offset") + } + + // Write our sequence + var seq seq + l := best.length + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + + // Index old s + 1 -> s - 1 or sLimit + end := s + if s > sLimit-4 { + end = sLimit - 4 + } + + off := index0 + e.cur + for index0 < end { + cv0 := load6432(src, index0) + h0 := hashLen(cv0, bestLongTableBits, bestLongLen) + h1 := hashLen(cv0, bestShortTableBits, bestShortLen) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} + index0++ + off++ + } + if s >= sLimit { + break encodeLoop + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + blk.recentOffsets[2] = uint32(offset3) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *bestFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + e.ensureHist(len(src)) + e.Encode(blk, src) +} + +// Reset will reset and set a dictionary if not nil +func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]prevEntry, len(e.table)) + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff; i < end; i += 4 { + const hashLog = bestShortTableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, bestShortLen) // 0 -> 4 + nextHash1 := hashLen(cv>>8, hashLog, bestShortLen) // 1 -> 5 + nextHash2 := hashLen(cv>>16, hashLog, bestShortLen) // 2 -> 6 + nextHash3 := hashLen(cv>>24, hashLog, bestShortLen) // 3 -> 7 + e.dictTable[nextHash] = prevEntry{ + prev: e.dictTable[nextHash].offset, + offset: i, + } + e.dictTable[nextHash1] = prevEntry{ + prev: e.dictTable[nextHash1].offset, + offset: i + 1, + } + e.dictTable[nextHash2] = prevEntry{ + prev: e.dictTable[nextHash2].offset, + offset: i + 2, + } + e.dictTable[nextHash3] = prevEntry{ + prev: e.dictTable[nextHash3].offset, + offset: i + 3, + } + } + e.lastDictID = d.id + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]prevEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + h := hashLen(cv, bestLongTableBits, bestLongLen) + e.dictLongTable[h] = prevEntry{ + offset: e.maxMatchOff, + prev: e.dictLongTable[h].offset, + } + + end := int32(len(d.content)) - 8 + e.maxMatchOff + off := 8 // First to read + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[off]) << 56) + h := hashLen(cv, bestLongTableBits, bestLongLen) + e.dictLongTable[h] = prevEntry{ + offset: i, + prev: e.dictLongTable[h].offset, + } + off++ + } + } + e.lastDictID = d.id + } + // Reset table to initial state + copy(e.longTable[:], e.dictLongTable) + + e.cur = e.maxMatchOff + // Reset table to initial state + copy(e.table[:], e.dictTable) +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go new file mode 100644 index 0000000000000..a4f5bf91fc63b --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -0,0 +1,1252 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +const ( + betterLongTableBits = 19 // Bits used in the long match table + betterLongTableSize = 1 << betterLongTableBits // Size of the table + betterLongLen = 8 // Bytes used for table hash + + // Note: Increasing the short table bits or making the hash shorter + // can actually lead to compression degradation since it will 'steal' more from the + // long match table and match offsets are quite big. + // This greatly depends on the type of input. + betterShortTableBits = 13 // Bits used in the short match table + betterShortTableSize = 1 << betterShortTableBits // Size of the table + betterShortLen = 5 // Bytes used for table hash + + betterLongTableShardCnt = 1 << (betterLongTableBits - dictShardBits) // Number of shards in the table + betterLongTableShardSize = betterLongTableSize / betterLongTableShardCnt // Size of an individual shard + + betterShortTableShardCnt = 1 << (betterShortTableBits - dictShardBits) // Number of shards in the table + betterShortTableShardSize = betterShortTableSize / betterShortTableShardCnt // Size of an individual shard +) + +type prevEntry struct { + offset int32 + prev int32 +} + +// betterFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. +// The long match table contains the previous entry with the same hash, +// effectively making it a "chain" of length 2. +// When we find a long match we choose between the two values and select the longest. +// When we find a short match, after checking the long, we check if we can find a long at n+1 +// and that it is longer (lazy matching). +type betterFastEncoder struct { + fastBase + table [betterShortTableSize]tableEntry + longTable [betterLongTableSize]prevEntry +} + +type betterFastEncoderDict struct { + betterFastEncoder + dictTable []tableEntry + dictLongTable []prevEntry + shortTableShardDirty [betterShortTableShardCnt]bool + longTableShardDirty [betterLongTableShardCnt]bool + allDirty bool +} + +// Encode improves compression... +func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [betterShortTableSize]tableEntry{} + e.longTable = [betterLongTableSize]prevEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.cur = e.maxMatchOff + break + } + // Add block to history + s := e.addBlock(src) + blk.size = len(src) + + // Check RLE first + if len(src) > zstdMinMatch { + ml := matchLen(src[1:], src) + if ml == len(src)-1 { + blk.literals = append(blk.literals, src[0]) + blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3}) + return + } + } + + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 9 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + var matched, index0 int32 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + off := s + e.cur + e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} + e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + index0 = s + 1 + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + index0 := s + repOff + s += lenght + repOff + + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + continue + } + const repOff2 = 1 + + // We deviate from the reference encoder and also check offset 2. + // Still slower and not much better, so disabled. + // repIndex = s - offset2 + repOff2 + if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { + // Consider history as well. + var seq seq + lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 2 + seq.offset = 2 + if debugSequences { + println("repeat sequence 2", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + s += lenght + repOff2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + // Swap offsets + offset1, offset2 = offset2, offset1 + continue + } + } + // Find the offsets of our two matches. + coffsetL := candidateL.offset - e.cur + coffsetLP := candidateL.prev - e.cur + + // Check if we have a long match. + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetL+8, src) + 8 + t = coffsetL + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 + if prevMatch > matched { + matched = prevMatch + t = coffsetLP + } + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + } + break + } + + // Check if we have a long match on prev. + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetLP+8, src) + 8 + t = coffsetLP + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + coffsetS := candidateS.offset - e.cur + + // Check if we have a short match. + if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + matched = e.matchlen(s+4, coffsetS+4, src) + 4 + + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = candidateL.offset - e.cur + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("long match (after short)") + } + break + } + } + + // Check prev long... + coffsetL = candidateL.prev - e.cur + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("prev long match (after short)") + } + break + } + } + t = coffsetS + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // Try to find a better match by searching for a long match at the end of the current best match + if s+matched < sLimit { + // Allow some bytes at the beginning to mismatch. + // Sweet spot is around 3 bytes, but depends on input. + // The skipped bytes are tested in Extend backwards, + // and still picked up as part of the match if they do. + const skipBeginning = 3 + + nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) + s2 := s + skipBeginning + cv := load3232(src, s2) + candidateL := e.longTable[nextHashL] + coffsetL := candidateL.offset - e.cur - matched + skipBeginning + if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + s = s2 + matched = matchedNext + if debugMatches { + println("long match at end-of-match") + } + } + } + + // Check prev long... + if true { + coffsetL = candidateL.prev - e.cur - matched + skipBeginning + if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + s = s2 + matched = matchedNext + if debugMatches { + println("prev long match at end-of-match") + } + } + } + } + } + // A match has been found. Update recent offsets. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := matched + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + off := index0 + e.cur + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + off += 2 + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + e.ensureHist(len(src)) + e.Encode(blk, src) +} + +// Encode improves compression... +func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = prevEntry{} + } + e.cur = e.maxMatchOff + e.allDirty = true + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.allDirty = true + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 9 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + var matched, index0 int32 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + off := s + e.cur + e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + e.markShortShardDirty(nextHashS) + index0 = s + 1 + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + s += lenght + repOff + + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + } + cv = load6432(src, s) + continue + } + const repOff2 = 1 + + // We deviate from the reference encoder and also check offset 2. + // Still slower and not much better, so disabled. + // repIndex = s - offset2 + repOff2 + if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { + // Consider history as well. + var seq seq + lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 2 + seq.offset = 2 + if debugSequences { + println("repeat sequence 2", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + s += lenght + repOff2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + } + cv = load6432(src, s) + // Swap offsets + offset1, offset2 = offset2, offset1 + continue + } + } + // Find the offsets of our two matches. + coffsetL := candidateL.offset - e.cur + coffsetLP := candidateL.prev - e.cur + + // Check if we have a long match. + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetL+8, src) + 8 + t = coffsetL + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 + if prevMatch > matched { + matched = prevMatch + t = coffsetLP + } + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + } + break + } + + // Check if we have a long match on prev. + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetLP+8, src) + 8 + t = coffsetLP + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + coffsetS := candidateS.offset - e.cur + + // Check if we have a short match. + if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + matched = e.matchlen(s+4, coffsetS+4, src) + 4 + + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = candidateL.offset - e.cur + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} + e.markLongShardDirty(nextHashL) + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("long match (after short)") + } + break + } + } + + // Check prev long... + coffsetL = candidateL.prev - e.cur + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("prev long match (after short)") + } + break + } + } + t = coffsetS + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // Try to find a better match by searching for a long match at the end of the current best match + if s+matched < sLimit { + nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) + cv := load3232(src, s) + candidateL := e.longTable[nextHashL] + coffsetL := candidateL.offset - e.cur - matched + if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + matched = matchedNext + if debugMatches { + println("long match at end-of-match") + } + } + } + + // Check prev long... + if true { + coffsetL = candidateL.prev - e.cur - matched + if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + matched = matchedNext + if debugMatches { + println("prev long match at end-of-match") + } + } + } + } + } + // A match has been found. Update recent offsets. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := matched + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + off := index0 + e.cur + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + off += 2 + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShortShardDirty(nextHashS) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d != nil { + panic("betterFastEncoder: Reset with dict") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]tableEntry, len(e.table)) + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff; i < end; i += 4 { + const hashLog = betterShortTableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, betterShortLen) // 0 -> 4 + nextHash1 := hashLen(cv>>8, hashLog, betterShortLen) // 1 -> 5 + nextHash2 := hashLen(cv>>16, hashLog, betterShortLen) // 2 -> 6 + nextHash3 := hashLen(cv>>24, hashLog, betterShortLen) // 3 -> 7 + e.dictTable[nextHash] = tableEntry{ + val: uint32(cv), + offset: i, + } + e.dictTable[nextHash1] = tableEntry{ + val: uint32(cv >> 8), + offset: i + 1, + } + e.dictTable[nextHash2] = tableEntry{ + val: uint32(cv >> 16), + offset: i + 2, + } + e.dictTable[nextHash3] = tableEntry{ + val: uint32(cv >> 24), + offset: i + 3, + } + } + e.lastDictID = d.id + e.allDirty = true + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]prevEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + h := hashLen(cv, betterLongTableBits, betterLongLen) + e.dictLongTable[h] = prevEntry{ + offset: e.maxMatchOff, + prev: e.dictLongTable[h].offset, + } + + end := int32(len(d.content)) - 8 + e.maxMatchOff + off := 8 // First to read + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[off]) << 56) + h := hashLen(cv, betterLongTableBits, betterLongLen) + e.dictLongTable[h] = prevEntry{ + offset: i, + prev: e.dictLongTable[h].offset, + } + off++ + } + } + e.lastDictID = d.id + e.allDirty = true + } + + // Reset table to initial state + { + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.shortTableShardDirty { + if e.shortTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + const shardCnt = betterShortTableShardCnt + const shardSize = betterShortTableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + copy(e.table[:], e.dictTable) + for i := range e.shortTableShardDirty { + e.shortTableShardDirty[i] = false + } + } else { + for i := range e.shortTableShardDirty { + if !e.shortTableShardDirty[i] { + continue + } + + copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + e.shortTableShardDirty[i] = false + } + } + } + { + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.shortTableShardDirty { + if e.shortTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + const shardCnt = betterLongTableShardCnt + const shardSize = betterLongTableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + copy(e.longTable[:], e.dictLongTable) + for i := range e.longTableShardDirty { + e.longTableShardDirty[i] = false + } + } else { + for i := range e.longTableShardDirty { + if !e.longTableShardDirty[i] { + continue + } + + copy(e.longTable[i*shardSize:(i+1)*shardSize], e.dictLongTable[i*shardSize:(i+1)*shardSize]) + e.longTableShardDirty[i] = false + } + } + } + e.cur = e.maxMatchOff + e.allDirty = false +} + +func (e *betterFastEncoderDict) markLongShardDirty(entryNum uint32) { + e.longTableShardDirty[entryNum/betterLongTableShardSize] = true +} + +func (e *betterFastEncoderDict) markShortShardDirty(entryNum uint32) { + e.shortTableShardDirty[entryNum/betterShortTableShardSize] = true +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go new file mode 100644 index 0000000000000..a154c18f74161 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -0,0 +1,1123 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +const ( + dFastLongTableBits = 17 // Bits used in the long match table + dFastLongTableSize = 1 << dFastLongTableBits // Size of the table + dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + dFastLongLen = 8 // Bytes used for table hash + + dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table + dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard + + dFastShortTableBits = tableBits // Bits used in the short match table + dFastShortTableSize = 1 << dFastShortTableBits // Size of the table + dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + dFastShortLen = 5 // Bytes used for table hash + +) + +type doubleFastEncoder struct { + fastEncoder + longTable [dFastLongTableSize]tableEntry +} + +type doubleFastEncoderDict struct { + fastEncoderDict + longTable [dFastLongTableSize]tableEntry + dictLongTable []tableEntry + longTableShardDirty [dLongTableShardCnt]bool +} + +// Encode mimmics functionality in zstd_dfast.c +func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [dFastShortTableSize]tableEntry{} + e.longTable = [dFastLongTableSize]tableEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.longTable[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += lenght + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 + e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 + e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 + + cv = load6432(src, s) + + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + if e.cur >= e.bufferReset { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.cur = e.maxMatchOff + } + + s := int32(0) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + for { + + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + + if len(blk.sequences) > 2 { + if load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + //length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + int32(matchLen(src[s+4+repOff:], src[repIndex+4:])) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d). cur: %d", s, t, e.cur)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + // Extend the 4-byte match as long as possible. + //l := e.matchlen(s+4, t+4, src) + 4 + l := int32(matchLen(src[s+4:], src[t+4:])) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 + e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 + e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 + + cv = load6432(src, s) + + if len(blk.sequences) <= 2 { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hashLen(cv1>>8, dFastShortTableBits, dFastShortLen) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + //l := 4 + e.matchlen(s+4, o2+4, src) + l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + + // We do not store history, so we must offset e.cur to avoid false matches for next user. + if e.cur < e.bufferReset { + e.cur += int32(len(src)) + } +} + +// Encode will encode the content, with a dictionary if initialized for it. +func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.markAllShardsDirty() + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.longTable[i].offset = v + } + e.markAllShardsDirty() + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = entry + e.markShardDirty(nextHashS) + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += lenght + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + e.markLongShardDirty(nextHashL) + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen) + longHash2 := hashLen(cv1, dFastLongTableBits, dFastLongLen) + e.longTable[longHash1] = te0 + e.longTable[longHash2] = te1 + e.markLongShardDirty(longHash1) + e.markLongShardDirty(longHash2) + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + hashVal1 := hashLen(cv0, dFastShortTableBits, dFastShortLen) + hashVal2 := hashLen(cv1, dFastShortTableBits, dFastShortLen) + e.table[hashVal1] = te0 + e.markShardDirty(hashVal1) + e.table[hashVal2] = te1 + e.markShardDirty(hashVal2) + + cv = load6432(src, s) + + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = entry + e.markShardDirty(nextHashS) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + // If we encoded more than 64K mark all dirty. + if len(src) > 64<<10 { + e.markAllShardsDirty() + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) { + e.fastEncoder.Reset(d, singleBlock) + if d != nil { + panic("doubleFastEncoder: Reset with dict not supported") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { + allDirty := e.allDirty + e.fastEncoderDict.Reset(d, singleBlock) + if d == nil { + return + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]tableEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ + val: uint32(cv), + offset: e.maxMatchOff, + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[i-e.maxMatchOff+7]) << 56) + e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ + val: uint32(cv), + offset: i, + } + } + } + e.lastDictID = d.id + allDirty = true + } + // Reset table to initial state + e.cur = e.maxMatchOff + + dirtyShardCnt := 0 + if !allDirty { + for i := range e.longTableShardDirty { + if e.longTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + + if allDirty || dirtyShardCnt > dLongTableShardCnt/2 { + //copy(e.longTable[:], e.dictLongTable) + e.longTable = *(*[dFastLongTableSize]tableEntry)(e.dictLongTable) + for i := range e.longTableShardDirty { + e.longTableShardDirty[i] = false + } + return + } + for i := range e.longTableShardDirty { + if !e.longTableShardDirty[i] { + continue + } + + // copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize]) + *(*[dLongTableShardSize]tableEntry)(e.longTable[i*dLongTableShardSize:]) = *(*[dLongTableShardSize]tableEntry)(e.dictLongTable[i*dLongTableShardSize:]) + + e.longTableShardDirty[i] = false + } +} + +func (e *doubleFastEncoderDict) markLongShardDirty(entryNum uint32) { + e.longTableShardDirty[entryNum/dLongTableShardSize] = true +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go new file mode 100644 index 0000000000000..f45a3da7dae6a --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -0,0 +1,891 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" +) + +const ( + tableBits = 15 // Bits used in the table + tableSize = 1 << tableBits // Size of the table + tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table + tableShardSize = tableSize / tableShardCnt // Size of an individual shard + tableFastHashLen = 6 + tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + maxMatchLength = 131074 +) + +type tableEntry struct { + val uint32 + offset int32 +} + +type fastEncoder struct { + fastBase + table [tableSize]tableEntry +} + +type fastEncoderDict struct { + fastEncoder + dictTable []tableEntry + tableShardDirty [tableShardCnt]bool + allDirty bool +} + +// Encode mimmics functionality in zstd_fast.c +func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + + if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+6, repIndex+4, src) + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + if debugEncoder { + if len(src) > maxCompressedBlockSize { + panic("src too big") + } + } + + // Protect against e.cur wraparound. + if e.cur >= e.bufferReset { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + } + + s := int32(0) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + + for { + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + + if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+6, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic(fmt.Sprintf("t (%d) < 0, candidate.offset: %d, e.cur: %d, coffset0: %d, e.maxMatchOff: %d", t, candidate.offset, e.cur, coffset0, e.maxMatchOff)) + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && t < 0 { + panic(fmt.Sprintf("t (%d) < 0 ", t)) + } + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + // We do not store history, so we must offset e.cur to avoid false matches for next user. + if e.cur < e.bufferReset { + e.cur += int32(len(src)) + } +} + +// Encode will encode the content, with a dictionary if initialized for it. +func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + if e.allDirty || len(src) > 32<<10 { + e.fastEncoder.Encode(blk, src) + e.allDirty = true + return + } + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [tableSize]tableEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 7 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShardDirty(nextHash) + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + e.markShardDirty(nextHash2) + + if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+6, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShardDirty(nextHash) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *fastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d != nil { + panic("fastEncoder: Reset with dict") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]tableEntry, len(e.table)) + } + if true { + end := e.maxMatchOff + int32(len(d.content)) - 8 + for i := e.maxMatchOff; i < end; i += 2 { + const hashLog = tableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 6 + nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 7 + e.dictTable[nextHash] = tableEntry{ + val: uint32(cv), + offset: i, + } + e.dictTable[nextHash1] = tableEntry{ + val: uint32(cv >> 8), + offset: i + 1, + } + } + } + e.lastDictID = d.id + e.allDirty = true + } + + e.cur = e.maxMatchOff + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.tableShardDirty { + if e.tableShardDirty[i] { + dirtyShardCnt++ + } + } + } + + const shardCnt = tableShardCnt + const shardSize = tableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + //copy(e.table[:], e.dictTable) + e.table = *(*[tableSize]tableEntry)(e.dictTable) + for i := range e.tableShardDirty { + e.tableShardDirty[i] = false + } + e.allDirty = false + return + } + for i := range e.tableShardDirty { + if !e.tableShardDirty[i] { + continue + } + + //copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + *(*[shardSize]tableEntry)(e.table[i*shardSize:]) = *(*[shardSize]tableEntry)(e.dictTable[i*shardSize:]) + e.tableShardDirty[i] = false + } + e.allDirty = false +} + +func (e *fastEncoderDict) markAllShardsDirty() { + e.allDirty = true +} + +func (e *fastEncoderDict) markShardDirty(entryNum uint32) { + e.tableShardDirty[entryNum/tableShardSize] = true +} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go new file mode 100644 index 0000000000000..72af7ef0fe001 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -0,0 +1,619 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "crypto/rand" + "fmt" + "io" + "math" + rdebug "runtime/debug" + "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +// Encoder provides encoding to Zstandard. +// An Encoder can be used for either compressing a stream via the +// io.WriteCloser interface supported by the Encoder or as multiple independent +// tasks via the EncodeAll function. +// Smaller encodes are encouraged to use the EncodeAll function. +// Use NewWriter to create a new instance. +type Encoder struct { + o encoderOptions + encoders chan encoder + state encoderState + init sync.Once +} + +type encoder interface { + Encode(blk *blockEnc, src []byte) + EncodeNoHist(blk *blockEnc, src []byte) + Block() *blockEnc + CRC() *xxhash.Digest + AppendCRC([]byte) []byte + WindowSize(size int64) int32 + UseBlock(*blockEnc) + Reset(d *dict, singleBlock bool) +} + +type encoderState struct { + w io.Writer + filling []byte + current []byte + previous []byte + encoder encoder + writing *blockEnc + err error + writeErr error + nWritten int64 + nInput int64 + frameContentSize int64 + headerWritten bool + eofWritten bool + fullFrameWritten bool + + // This waitgroup indicates an encode is running. + wg sync.WaitGroup + // This waitgroup indicates we have a block encoding/writing. + wWg sync.WaitGroup +} + +// NewWriter will create a new Zstandard encoder. +// If the encoder will be used for encoding blocks a nil writer can be used. +func NewWriter(w io.Writer, opts ...EOption) (*Encoder, error) { + initPredefined() + var e Encoder + e.o.setDefault() + for _, o := range opts { + err := o(&e.o) + if err != nil { + return nil, err + } + } + if w != nil { + e.Reset(w) + } + return &e, nil +} + +func (e *Encoder) initialize() { + if e.o.concurrent == 0 { + e.o.setDefault() + } + e.encoders = make(chan encoder, e.o.concurrent) + for i := 0; i < e.o.concurrent; i++ { + enc := e.o.encoder() + e.encoders <- enc + } +} + +// Reset will re-initialize the writer and new writes will encode to the supplied writer +// as a new, independent stream. +func (e *Encoder) Reset(w io.Writer) { + s := &e.state + s.wg.Wait() + s.wWg.Wait() + if cap(s.filling) == 0 { + s.filling = make([]byte, 0, e.o.blockSize) + } + if e.o.concurrent > 1 { + if cap(s.current) == 0 { + s.current = make([]byte, 0, e.o.blockSize) + } + if cap(s.previous) == 0 { + s.previous = make([]byte, 0, e.o.blockSize) + } + s.current = s.current[:0] + s.previous = s.previous[:0] + if s.writing == nil { + s.writing = &blockEnc{lowMem: e.o.lowMem} + s.writing.init() + } + s.writing.initNewEncode() + } + if s.encoder == nil { + s.encoder = e.o.encoder() + } + s.filling = s.filling[:0] + s.encoder.Reset(e.o.dict, false) + s.headerWritten = false + s.eofWritten = false + s.fullFrameWritten = false + s.w = w + s.err = nil + s.nWritten = 0 + s.nInput = 0 + s.writeErr = nil + s.frameContentSize = 0 +} + +// ResetContentSize will reset and set a content size for the next stream. +// If the bytes written does not match the size given an error will be returned +// when calling Close(). +// This is removed when Reset is called. +// Sizes <= 0 results in no content size set. +func (e *Encoder) ResetContentSize(w io.Writer, size int64) { + e.Reset(w) + if size >= 0 { + e.state.frameContentSize = size + } +} + +// Write data to the encoder. +// Input data will be buffered and as the buffer fills up +// content will be compressed and written to the output. +// When done writing, use Close to flush the remaining output +// and write CRC if requested. +func (e *Encoder) Write(p []byte) (n int, err error) { + s := &e.state + for len(p) > 0 { + if len(p)+len(s.filling) < e.o.blockSize { + if e.o.crc { + _, _ = s.encoder.CRC().Write(p) + } + s.filling = append(s.filling, p...) + return n + len(p), nil + } + add := p + if len(p)+len(s.filling) > e.o.blockSize { + add = add[:e.o.blockSize-len(s.filling)] + } + if e.o.crc { + _, _ = s.encoder.CRC().Write(add) + } + s.filling = append(s.filling, add...) + p = p[len(add):] + n += len(add) + if len(s.filling) < e.o.blockSize { + return n, nil + } + err := e.nextBlock(false) + if err != nil { + return n, err + } + if debugAsserts && len(s.filling) > 0 { + panic(len(s.filling)) + } + } + return n, nil +} + +// nextBlock will synchronize and start compressing input in e.state.filling. +// If an error has occurred during encoding it will be returned. +func (e *Encoder) nextBlock(final bool) error { + s := &e.state + // Wait for current block. + s.wg.Wait() + if s.err != nil { + return s.err + } + if len(s.filling) > e.o.blockSize { + return fmt.Errorf("block > maxStoreBlockSize") + } + if !s.headerWritten { + // If we have a single block encode, do a sync compression. + if final && len(s.filling) == 0 && !e.o.fullZero { + s.headerWritten = true + s.fullFrameWritten = true + s.eofWritten = true + return nil + } + if final && len(s.filling) > 0 { + s.current = e.EncodeAll(s.filling, s.current[:0]) + var n2 int + n2, s.err = s.w.Write(s.current) + if s.err != nil { + return s.err + } + s.nWritten += int64(n2) + s.nInput += int64(len(s.filling)) + s.current = s.current[:0] + s.filling = s.filling[:0] + s.headerWritten = true + s.fullFrameWritten = true + s.eofWritten = true + return nil + } + + var tmp [maxHeaderSize]byte + fh := frameHeader{ + ContentSize: uint64(s.frameContentSize), + WindowSize: uint32(s.encoder.WindowSize(s.frameContentSize)), + SingleSegment: false, + Checksum: e.o.crc, + DictID: e.o.dict.ID(), + } + + dst := fh.appendTo(tmp[:0]) + s.headerWritten = true + s.wWg.Wait() + var n2 int + n2, s.err = s.w.Write(dst) + if s.err != nil { + return s.err + } + s.nWritten += int64(n2) + } + if s.eofWritten { + // Ensure we only write it once. + final = false + } + + if len(s.filling) == 0 { + // Final block, but no data. + if final { + enc := s.encoder + blk := enc.Block() + blk.reset(nil) + blk.last = true + blk.encodeRaw(nil) + s.wWg.Wait() + _, s.err = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + s.eofWritten = true + } + return s.err + } + + // SYNC: + if e.o.concurrent == 1 { + src := s.filling + s.nInput += int64(len(s.filling)) + if debugEncoder { + println("Adding sync block,", len(src), "bytes, final:", final) + } + enc := s.encoder + blk := enc.Block() + blk.reset(nil) + enc.Encode(blk, src) + blk.last = final + if final { + s.eofWritten = true + } + + s.err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + if s.err != nil { + return s.err + } + _, s.err = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + s.filling = s.filling[:0] + return s.err + } + + // Move blocks forward. + s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current + s.nInput += int64(len(s.current)) + s.wg.Add(1) + go func(src []byte) { + if debugEncoder { + println("Adding block,", len(src), "bytes, final:", final) + } + defer func() { + if r := recover(); r != nil { + s.err = fmt.Errorf("panic while encoding: %v", r) + rdebug.PrintStack() + } + s.wg.Done() + }() + enc := s.encoder + blk := enc.Block() + enc.Encode(blk, src) + blk.last = final + if final { + s.eofWritten = true + } + // Wait for pending writes. + s.wWg.Wait() + if s.writeErr != nil { + s.err = s.writeErr + return + } + // Transfer encoders from previous write block. + blk.swapEncoders(s.writing) + // Transfer recent offsets to next. + enc.UseBlock(s.writing) + s.writing = blk + s.wWg.Add(1) + go func() { + defer func() { + if r := recover(); r != nil { + s.writeErr = fmt.Errorf("panic while encoding/writing: %v", r) + rdebug.PrintStack() + } + s.wWg.Done() + }() + s.writeErr = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + if s.writeErr != nil { + return + } + _, s.writeErr = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + }() + }(s.current) + return nil +} + +// ReadFrom reads data from r until EOF or error. +// The return value n is the number of bytes read. +// Any error except io.EOF encountered during the read is also returned. +// +// The Copy function uses ReaderFrom if available. +func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) { + if debugEncoder { + println("Using ReadFrom") + } + + // Flush any current writes. + if len(e.state.filling) > 0 { + if err := e.nextBlock(false); err != nil { + return 0, err + } + } + e.state.filling = e.state.filling[:e.o.blockSize] + src := e.state.filling + for { + n2, err := r.Read(src) + if e.o.crc { + _, _ = e.state.encoder.CRC().Write(src[:n2]) + } + // src is now the unfilled part... + src = src[n2:] + n += int64(n2) + switch err { + case io.EOF: + e.state.filling = e.state.filling[:len(e.state.filling)-len(src)] + if debugEncoder { + println("ReadFrom: got EOF final block:", len(e.state.filling)) + } + return n, nil + case nil: + default: + if debugEncoder { + println("ReadFrom: got error:", err) + } + e.state.err = err + return n, err + } + if len(src) > 0 { + if debugEncoder { + println("ReadFrom: got space left in source:", len(src)) + } + continue + } + err = e.nextBlock(false) + if err != nil { + return n, err + } + e.state.filling = e.state.filling[:e.o.blockSize] + src = e.state.filling + } +} + +// Flush will send the currently written data to output +// and block until everything has been written. +// This should only be used on rare occasions where pushing the currently queued data is critical. +func (e *Encoder) Flush() error { + s := &e.state + if len(s.filling) > 0 { + err := e.nextBlock(false) + if err != nil { + return err + } + } + s.wg.Wait() + s.wWg.Wait() + if s.err != nil { + return s.err + } + return s.writeErr +} + +// Close will flush the final output and close the stream. +// The function will block until everything has been written. +// The Encoder can still be re-used after calling this. +func (e *Encoder) Close() error { + s := &e.state + if s.encoder == nil { + return nil + } + err := e.nextBlock(true) + if err != nil { + return err + } + if s.frameContentSize > 0 { + if s.nInput != s.frameContentSize { + return fmt.Errorf("frame content size %d given, but %d bytes was written", s.frameContentSize, s.nInput) + } + } + if e.state.fullFrameWritten { + return s.err + } + s.wg.Wait() + s.wWg.Wait() + + if s.err != nil { + return s.err + } + if s.writeErr != nil { + return s.writeErr + } + + // Write CRC + if e.o.crc && s.err == nil { + // heap alloc. + var tmp [4]byte + _, s.err = s.w.Write(s.encoder.AppendCRC(tmp[:0])) + s.nWritten += 4 + } + + // Add padding with content from crypto/rand.Reader + if s.err == nil && e.o.pad > 0 { + add := calcSkippableFrame(s.nWritten, int64(e.o.pad)) + frame, err := skippableFrame(s.filling[:0], add, rand.Reader) + if err != nil { + return err + } + _, s.err = s.w.Write(frame) + } + return s.err +} + +// EncodeAll will encode all input in src and append it to dst. +// This function can be called concurrently, but each call will only run on a single goroutine. +// If empty input is given, nothing is returned, unless WithZeroFrames is specified. +// Encoded blocks can be concatenated and the result will be the combined input stream. +// Data compressed with EncodeAll can be decoded with the Decoder, +// using either a stream or DecodeAll. +func (e *Encoder) EncodeAll(src, dst []byte) []byte { + if len(src) == 0 { + if e.o.fullZero { + // Add frame header. + fh := frameHeader{ + ContentSize: 0, + WindowSize: MinWindowSize, + SingleSegment: true, + // Adding a checksum would be a waste of space. + Checksum: false, + DictID: 0, + } + dst = fh.appendTo(dst) + + // Write raw block as last one only. + var blk blockHeader + blk.setSize(0) + blk.setType(blockTypeRaw) + blk.setLast(true) + dst = blk.appendTo(dst) + } + return dst + } + e.init.Do(e.initialize) + enc := <-e.encoders + defer func() { + // Release encoder reference to last block. + // If a non-single block is needed the encoder will reset again. + e.encoders <- enc + }() + // Use single segments when above minimum window and below window size. + single := len(src) <= e.o.windowSize && len(src) > MinWindowSize + if e.o.single != nil { + single = *e.o.single + } + fh := frameHeader{ + ContentSize: uint64(len(src)), + WindowSize: uint32(enc.WindowSize(int64(len(src)))), + SingleSegment: single, + Checksum: e.o.crc, + DictID: e.o.dict.ID(), + } + + // If less than 1MB, allocate a buffer up front. + if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem { + dst = make([]byte, 0, len(src)) + } + dst = fh.appendTo(dst) + + // If we can do everything in one block, prefer that. + if len(src) <= e.o.blockSize { + enc.Reset(e.o.dict, true) + // Slightly faster with no history and everything in one block. + if e.o.crc { + _, _ = enc.CRC().Write(src) + } + blk := enc.Block() + blk.last = true + if e.o.dict == nil { + enc.EncodeNoHist(blk, src) + } else { + enc.Encode(blk, src) + } + + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + oldout := blk.output + // Output directly to dst + blk.output = dst + + err := blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + if err != nil { + panic(err) + } + dst = blk.output + blk.output = oldout + } else { + enc.Reset(e.o.dict, false) + blk := enc.Block() + for len(src) > 0 { + todo := src + if len(todo) > e.o.blockSize { + todo = todo[:e.o.blockSize] + } + src = src[len(todo):] + if e.o.crc { + _, _ = enc.CRC().Write(todo) + } + blk.pushOffsets() + enc.Encode(blk, todo) + if len(src) == 0 { + blk.last = true + } + err := blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy) + if err != nil { + panic(err) + } + dst = append(dst, blk.output...) + blk.reset(nil) + } + } + if e.o.crc { + dst = enc.AppendCRC(dst) + } + // Add padding with content from crypto/rand.Reader + if e.o.pad > 0 { + add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad)) + var err error + dst, err = skippableFrame(dst, add, rand.Reader) + if err != nil { + panic(err) + } + } + return dst +} + +// MaxEncodedSize returns the expected maximum +// size of an encoded block or stream. +func (e *Encoder) MaxEncodedSize(size int) int { + frameHeader := 4 + 2 // magic + frame header & window descriptor + if e.o.dict != nil { + frameHeader += 4 + } + // Frame content size: + if size < 256 { + frameHeader++ + } else if size < 65536+256 { + frameHeader += 2 + } else if size < math.MaxInt32 { + frameHeader += 4 + } else { + frameHeader += 8 + } + // Final crc + if e.o.crc { + frameHeader += 4 + } + + // Max overhead is 3 bytes/block. + // There cannot be 0 blocks. + blocks := (size + e.o.blockSize) / e.o.blockSize + + // Combine, add padding. + maxSz := frameHeader + 3*blocks + size + if e.o.pad > 1 { + maxSz += calcSkippableFrame(int64(maxSz), int64(e.o.pad)) + } + return maxSz +} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go new file mode 100644 index 0000000000000..20671dcb91d93 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -0,0 +1,339 @@ +package zstd + +import ( + "errors" + "fmt" + "math" + "math/bits" + "runtime" + "strings" +) + +// EOption is an option for creating a encoder. +type EOption func(*encoderOptions) error + +// options retains accumulated state of multiple options. +type encoderOptions struct { + concurrent int + level EncoderLevel + single *bool + pad int + blockSize int + windowSize int + crc bool + fullZero bool + noEntropy bool + allLitEntropy bool + customWindow bool + customALEntropy bool + customBlockSize bool + lowMem bool + dict *dict +} + +func (o *encoderOptions) setDefault() { + *o = encoderOptions{ + concurrent: runtime.GOMAXPROCS(0), + crc: true, + single: nil, + blockSize: maxCompressedBlockSize, + windowSize: 8 << 20, + level: SpeedDefault, + allLitEntropy: false, + lowMem: false, + } +} + +// encoder returns an encoder with the selected options. +func (o encoderOptions) encoder() encoder { + switch o.level { + case SpeedFastest: + if o.dict != nil { + return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} + } + return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} + + case SpeedDefault: + if o.dict != nil { + return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}} + } + return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} + case SpeedBetterCompression: + if o.dict != nil { + return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} + } + return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} + case SpeedBestCompression: + return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} + } + panic("unknown compression level") +} + +// WithEncoderCRC will add CRC value to output. +// Output will be 4 bytes larger. +func WithEncoderCRC(b bool) EOption { + return func(o *encoderOptions) error { o.crc = b; return nil } +} + +// WithEncoderConcurrency will set the concurrency, +// meaning the maximum number of encoders to run concurrently. +// The value supplied must be at least 1. +// For streams, setting a value of 1 will disable async compression. +// By default this will be set to GOMAXPROCS. +func WithEncoderConcurrency(n int) EOption { + return func(o *encoderOptions) error { + if n <= 0 { + return fmt.Errorf("concurrency must be at least 1") + } + o.concurrent = n + return nil + } +} + +// WithWindowSize will set the maximum allowed back-reference distance. +// The value must be a power of two between MinWindowSize and MaxWindowSize. +// A larger value will enable better compression but allocate more memory and, +// for above-default values, take considerably longer. +// The default value is determined by the compression level and max 8MB. +func WithWindowSize(n int) EOption { + return func(o *encoderOptions) error { + switch { + case n < MinWindowSize: + return fmt.Errorf("window size must be at least %d", MinWindowSize) + case n > MaxWindowSize: + return fmt.Errorf("window size must be at most %d", MaxWindowSize) + case (n & (n - 1)) != 0: + return errors.New("window size must be a power of 2") + } + + o.windowSize = n + o.customWindow = true + if o.blockSize > o.windowSize { + o.blockSize = o.windowSize + o.customBlockSize = true + } + return nil + } +} + +// WithEncoderPadding will add padding to all output so the size will be a multiple of n. +// This can be used to obfuscate the exact output size or make blocks of a certain size. +// The contents will be a skippable frame, so it will be invisible by the decoder. +// n must be > 0 and <= 1GB, 1<<30 bytes. +// The padded area will be filled with data from crypto/rand.Reader. +// If `EncodeAll` is used with data already in the destination, the total size will be multiple of this. +func WithEncoderPadding(n int) EOption { + return func(o *encoderOptions) error { + if n <= 0 { + return fmt.Errorf("padding must be at least 1") + } + // No need to waste our time. + if n == 1 { + n = 0 + } + if n > 1<<30 { + return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ") + } + o.pad = n + return nil + } +} + +// EncoderLevel predefines encoder compression levels. +// Only use the constants made available, since the actual mapping +// of these values are very likely to change and your compression could change +// unpredictably when upgrading the library. +type EncoderLevel int + +const ( + speedNotSet EncoderLevel = iota + + // SpeedFastest will choose the fastest reasonable compression. + // This is roughly equivalent to the fastest Zstandard mode. + SpeedFastest + + // SpeedDefault is the default "pretty fast" compression option. + // This is roughly equivalent to the default Zstandard mode (level 3). + SpeedDefault + + // SpeedBetterCompression will yield better compression than the default. + // Currently it is about zstd level 7-8 with ~ 2x-3x the default CPU usage. + // By using this, notice that CPU usage may go up in the future. + SpeedBetterCompression + + // SpeedBestCompression will choose the best available compression option. + // This will offer the best compression no matter the CPU cost. + SpeedBestCompression + + // speedLast should be kept as the last actual compression option. + // The is not for external usage, but is used to keep track of the valid options. + speedLast +) + +// EncoderLevelFromString will convert a string representation of an encoding level back +// to a compression level. The compare is not case sensitive. +// If the string wasn't recognized, (false, SpeedDefault) will be returned. +func EncoderLevelFromString(s string) (bool, EncoderLevel) { + for l := speedNotSet + 1; l < speedLast; l++ { + if strings.EqualFold(s, l.String()) { + return true, l + } + } + return false, SpeedDefault +} + +// EncoderLevelFromZstd will return an encoder level that closest matches the compression +// ratio of a specific zstd compression level. +// Many input values will provide the same compression level. +func EncoderLevelFromZstd(level int) EncoderLevel { + switch { + case level < 3: + return SpeedFastest + case level >= 3 && level < 6: + return SpeedDefault + case level >= 6 && level < 10: + return SpeedBetterCompression + default: + return SpeedBestCompression + } +} + +// String provides a string representation of the compression level. +func (e EncoderLevel) String() string { + switch e { + case SpeedFastest: + return "fastest" + case SpeedDefault: + return "default" + case SpeedBetterCompression: + return "better" + case SpeedBestCompression: + return "best" + default: + return "invalid" + } +} + +// WithEncoderLevel specifies a predefined compression level. +func WithEncoderLevel(l EncoderLevel) EOption { + return func(o *encoderOptions) error { + switch { + case l <= speedNotSet || l >= speedLast: + return fmt.Errorf("unknown encoder level") + } + o.level = l + if !o.customWindow { + switch o.level { + case SpeedFastest: + o.windowSize = 4 << 20 + if !o.customBlockSize { + o.blockSize = 1 << 16 + } + case SpeedDefault: + o.windowSize = 8 << 20 + case SpeedBetterCompression: + o.windowSize = 8 << 20 + case SpeedBestCompression: + o.windowSize = 8 << 20 + } + } + if !o.customALEntropy { + o.allLitEntropy = l > SpeedDefault + } + + return nil + } +} + +// WithZeroFrames will encode 0 length input as full frames. +// This can be needed for compatibility with zstandard usage, +// but is not needed for this package. +func WithZeroFrames(b bool) EOption { + return func(o *encoderOptions) error { + o.fullZero = b + return nil + } +} + +// WithAllLitEntropyCompression will apply entropy compression if no matches are found. +// Disabling this will skip incompressible data faster, but in cases with no matches but +// skewed character distribution compression is lost. +// Default value depends on the compression level selected. +func WithAllLitEntropyCompression(b bool) EOption { + return func(o *encoderOptions) error { + o.customALEntropy = true + o.allLitEntropy = b + return nil + } +} + +// WithNoEntropyCompression will always skip entropy compression of literals. +// This can be useful if content has matches, but unlikely to benefit from entropy +// compression. Usually the slight speed improvement is not worth enabling this. +func WithNoEntropyCompression(b bool) EOption { + return func(o *encoderOptions) error { + o.noEntropy = b + return nil + } +} + +// WithSingleSegment will set the "single segment" flag when EncodeAll is used. +// If this flag is set, data must be regenerated within a single continuous memory segment. +// In this case, Window_Descriptor byte is skipped, but Frame_Content_Size is necessarily present. +// As a consequence, the decoder must allocate a memory segment of size equal or larger than size of your content. +// In order to preserve the decoder from unreasonable memory requirements, +// a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range. +// For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB. +// This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations. +// If this is not specified, block encodes will automatically choose this based on the input size and the window size. +// This setting has no effect on streamed encodes. +func WithSingleSegment(b bool) EOption { + return func(o *encoderOptions) error { + o.single = &b + return nil + } +} + +// WithLowerEncoderMem will trade in some memory cases trade less memory usage for +// slower encoding speed. +// This will not change the window size which is the primary function for reducing +// memory usage. See WithWindowSize. +func WithLowerEncoderMem(b bool) EOption { + return func(o *encoderOptions) error { + o.lowMem = b + return nil + } +} + +// WithEncoderDict allows to register a dictionary that will be used for the encode. +// +// The slice dict must be in the [dictionary format] produced by +// "zstd --train" from the Zstandard reference implementation. +// +// The encoder *may* choose to use no dictionary instead for certain payloads. +// +// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format +func WithEncoderDict(dict []byte) EOption { + return func(o *encoderOptions) error { + d, err := loadDict(dict) + if err != nil { + return err + } + o.dict = d + return nil + } +} + +// WithEncoderDictRaw registers a dictionary that may be used by the encoder. +// +// The slice content may contain arbitrary data. It will be used as an initial +// history. +func WithEncoderDictRaw(id uint32, content []byte) EOption { + return func(o *encoderOptions) error { + if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { + return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) + } + o.dict = &dict{id: id, content: content, offsets: [3]int{1, 4, 8}} + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go new file mode 100644 index 0000000000000..53e160f7e5a2f --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -0,0 +1,413 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "encoding/hex" + "errors" + "io" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +type frameDec struct { + o decoderOptions + crc *xxhash.Digest + + WindowSize uint64 + + // Frame history passed between blocks + history history + + rawInput byteBuffer + + // Byte buffer that can be reused for small input blocks. + bBuf byteBuf + + FrameContentSize uint64 + + DictionaryID uint32 + HasCheckSum bool + SingleSegment bool +} + +const ( + // MinWindowSize is the minimum Window Size, which is 1 KB. + MinWindowSize = 1 << 10 + + // MaxWindowSize is the maximum encoder window size + // and the default decoder maximum window size. + MaxWindowSize = 1 << 29 +) + +const ( + frameMagic = "\x28\xb5\x2f\xfd" + skippableFrameMagic = "\x2a\x4d\x18" +) + +func newFrameDec(o decoderOptions) *frameDec { + if o.maxWindowSize > o.maxDecodedSize { + o.maxWindowSize = o.maxDecodedSize + } + d := frameDec{ + o: o, + } + return &d +} + +// reset will read the frame header and prepare for block decoding. +// If nothing can be read from the input, io.EOF will be returned. +// Any other error indicated that the stream contained data, but +// there was a problem. +func (d *frameDec) reset(br byteBuffer) error { + d.HasCheckSum = false + d.WindowSize = 0 + var signature [4]byte + for { + var err error + // Check if we can read more... + b, err := br.readSmall(1) + switch err { + case io.EOF, io.ErrUnexpectedEOF: + return io.EOF + case nil: + signature[0] = b[0] + default: + return err + } + // Read the rest, don't allow io.ErrUnexpectedEOF + b, err = br.readSmall(3) + switch err { + case io.EOF: + return io.EOF + case nil: + copy(signature[1:], b) + default: + return err + } + + if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 { + if debugDecoder { + println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString([]byte(skippableFrameMagic))) + } + // Break if not skippable frame. + break + } + // Read size to skip + b, err = br.readSmall(4) + if err != nil { + if debugDecoder { + println("Reading Frame Size", err) + } + return err + } + n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + println("Skipping frame with", n, "bytes.") + err = br.skipN(int64(n)) + if err != nil { + if debugDecoder { + println("Reading discarded frame", err) + } + return err + } + } + if string(signature[:]) != frameMagic { + if debugDecoder { + println("Got magic numbers: ", signature, "want:", []byte(frameMagic)) + } + return ErrMagicMismatch + } + + // Read Frame_Header_Descriptor + fhd, err := br.readByte() + if err != nil { + if debugDecoder { + println("Reading Frame_Header_Descriptor", err) + } + return err + } + d.SingleSegment = fhd&(1<<5) != 0 + + if fhd&(1<<3) != 0 { + return errors.New("reserved bit set on frame header") + } + + // Read Window_Descriptor + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor + d.WindowSize = 0 + if !d.SingleSegment { + wd, err := br.readByte() + if err != nil { + if debugDecoder { + println("Reading Window_Descriptor", err) + } + return err + } + printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) + windowLog := 10 + (wd >> 3) + windowBase := uint64(1) << windowLog + windowAdd := (windowBase / 8) * uint64(wd&0x7) + d.WindowSize = windowBase + windowAdd + } + + // Read Dictionary_ID + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id + d.DictionaryID = 0 + if size := fhd & 3; size != 0 { + if size == 3 { + size = 4 + } + + b, err := br.readSmall(int(size)) + if err != nil { + println("Reading Dictionary_ID", err) + return err + } + var id uint32 + switch len(b) { + case 1: + id = uint32(b[0]) + case 2: + id = uint32(b[0]) | (uint32(b[1]) << 8) + case 4: + id = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + } + if debugDecoder { + println("Dict size", size, "ID:", id) + } + d.DictionaryID = id + } + + // Read Frame_Content_Size + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size + var fcsSize int + v := fhd >> 6 + switch v { + case 0: + if d.SingleSegment { + fcsSize = 1 + } + default: + fcsSize = 1 << v + } + d.FrameContentSize = fcsUnknown + if fcsSize > 0 { + b, err := br.readSmall(fcsSize) + if err != nil { + println("Reading Frame content", err) + return err + } + switch len(b) { + case 1: + d.FrameContentSize = uint64(b[0]) + case 2: + // When FCS_Field_Size is 2, the offset of 256 is added. + d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 + case 4: + d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) + case 8: + d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) + d.FrameContentSize = uint64(d1) | (uint64(d2) << 32) + } + if debugDecoder { + println("Read FCS:", d.FrameContentSize) + } + } + + // Move this to shared. + d.HasCheckSum = fhd&(1<<2) != 0 + if d.HasCheckSum { + if d.crc == nil { + d.crc = xxhash.New() + } + d.crc.Reset() + } + + if d.WindowSize > d.o.maxWindowSize { + if debugDecoder { + printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) + } + return ErrWindowSizeExceeded + } + + if d.WindowSize == 0 && d.SingleSegment { + // We may not need window in this case. + d.WindowSize = d.FrameContentSize + if d.WindowSize < MinWindowSize { + d.WindowSize = MinWindowSize + } + if d.WindowSize > d.o.maxDecodedSize { + if debugDecoder { + printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) + } + return ErrDecoderSizeExceeded + } + } + + // The minimum Window_Size is 1 KB. + if d.WindowSize < MinWindowSize { + if debugDecoder { + println("got window size: ", d.WindowSize) + } + return ErrWindowSizeTooSmall + } + d.history.windowSize = int(d.WindowSize) + if !d.o.lowMem || d.history.windowSize < maxBlockSize { + // Alloc 2x window size if not low-mem, or window size below 2MB. + d.history.allocFrameBuffer = d.history.windowSize * 2 + } else { + if d.o.lowMem { + // Alloc with 1MB extra. + d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize/2 + } else { + // Alloc with 2MB extra. + d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize + } + } + + if debugDecoder { + println("Frame: Dict:", d.DictionaryID, "FrameContentSize:", d.FrameContentSize, "singleseg:", d.SingleSegment, "window:", d.WindowSize, "crc:", d.HasCheckSum) + } + + // history contains input - maybe we do something + d.rawInput = br + return nil +} + +// next will start decoding the next block from stream. +func (d *frameDec) next(block *blockDec) error { + if debugDecoder { + println("decoding new block") + } + err := block.reset(d.rawInput, d.WindowSize) + if err != nil { + println("block error:", err) + // Signal the frame decoder we have a problem. + block.sendErr(err) + return err + } + return nil +} + +// checkCRC will check the checksum, assuming the frame has one. +// Will return ErrCRCMismatch if crc check failed, otherwise nil. +func (d *frameDec) checkCRC() error { + // We can overwrite upper tmp now + buf, err := d.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + return err + } + + want := binary.LittleEndian.Uint32(buf[:4]) + got := uint32(d.crc.Sum64()) + + if got != want { + if debugDecoder { + printf("CRC check failed: got %08x, want %08x\n", got, want) + } + return ErrCRCMismatch + } + if debugDecoder { + printf("CRC ok %08x\n", got) + } + return nil +} + +// consumeCRC skips over the checksum, assuming the frame has one. +func (d *frameDec) consumeCRC() error { + _, err := d.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + } + return err +} + +// runDecoder will run the decoder for the remainder of the frame. +func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { + saved := d.history.b + + // We use the history for output to avoid copying it. + d.history.b = dst + d.history.ignoreBuffer = len(dst) + // Store input length, so we only check new data. + crcStart := len(dst) + d.history.decoders.maxSyncLen = 0 + if d.o.limitToCap { + d.history.decoders.maxSyncLen = uint64(cap(dst) - len(dst)) + } + if d.FrameContentSize != fcsUnknown { + if !d.o.limitToCap || d.FrameContentSize+uint64(len(dst)) < d.history.decoders.maxSyncLen { + d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst)) + } + if d.history.decoders.maxSyncLen > d.o.maxDecodedSize { + if debugDecoder { + println("maxSyncLen:", d.history.decoders.maxSyncLen, "> maxDecodedSize:", d.o.maxDecodedSize) + } + return dst, ErrDecoderSizeExceeded + } + if debugDecoder { + println("maxSyncLen:", d.history.decoders.maxSyncLen) + } + if !d.o.limitToCap && uint64(cap(dst)) < d.history.decoders.maxSyncLen { + // Alloc for output + dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc) + copy(dst2, dst) + dst = dst2 + } + } + var err error + for { + err = dec.reset(d.rawInput, d.WindowSize) + if err != nil { + break + } + if debugDecoder { + println("next block:", dec) + } + err = dec.decodeBuf(&d.history) + if err != nil { + break + } + if uint64(len(d.history.b)-crcStart) > d.o.maxDecodedSize { + println("runDecoder: maxDecodedSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.o.maxDecodedSize) + err = ErrDecoderSizeExceeded + break + } + if d.o.limitToCap && len(d.history.b) > cap(dst) { + println("runDecoder: cap exceeded", uint64(len(d.history.b)), ">", cap(dst)) + err = ErrDecoderSizeExceeded + break + } + if uint64(len(d.history.b)-crcStart) > d.FrameContentSize { + println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize) + err = ErrFrameSizeExceeded + break + } + if dec.Last { + break + } + if debugDecoder { + println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize) + } + } + dst = d.history.b + if err == nil { + if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize { + err = ErrFrameSizeMismatch + } else if d.HasCheckSum { + if d.o.ignoreChecksum { + err = d.consumeCRC() + } else { + d.crc.Write(dst[crcStart:]) + err = d.checkCRC() + } + } + } + d.history.b = saved + return dst, err +} diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go new file mode 100644 index 0000000000000..667ca06794e2a --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/frameenc.go @@ -0,0 +1,137 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "fmt" + "io" + "math" + "math/bits" +) + +type frameHeader struct { + ContentSize uint64 + WindowSize uint32 + SingleSegment bool + Checksum bool + DictID uint32 +} + +const maxHeaderSize = 14 + +func (f frameHeader) appendTo(dst []byte) []byte { + dst = append(dst, frameMagic...) + var fhd uint8 + if f.Checksum { + fhd |= 1 << 2 + } + if f.SingleSegment { + fhd |= 1 << 5 + } + + var dictIDContent []byte + if f.DictID > 0 { + var tmp [4]byte + if f.DictID < 256 { + fhd |= 1 + tmp[0] = uint8(f.DictID) + dictIDContent = tmp[:1] + } else if f.DictID < 1<<16 { + fhd |= 2 + binary.LittleEndian.PutUint16(tmp[:2], uint16(f.DictID)) + dictIDContent = tmp[:2] + } else { + fhd |= 3 + binary.LittleEndian.PutUint32(tmp[:4], f.DictID) + dictIDContent = tmp[:4] + } + } + var fcs uint8 + if f.ContentSize >= 256 { + fcs++ + } + if f.ContentSize >= 65536+256 { + fcs++ + } + if f.ContentSize >= 0xffffffff { + fcs++ + } + + fhd |= fcs << 6 + + dst = append(dst, fhd) + if !f.SingleSegment { + const winLogMin = 10 + windowLog := (bits.Len32(f.WindowSize-1) - winLogMin) << 3 + dst = append(dst, uint8(windowLog)) + } + if f.DictID > 0 { + dst = append(dst, dictIDContent...) + } + switch fcs { + case 0: + if f.SingleSegment { + dst = append(dst, uint8(f.ContentSize)) + } + // Unless SingleSegment is set, framessizes < 256 are not stored. + case 1: + f.ContentSize -= 256 + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8)) + case 2: + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24)) + case 3: + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24), + uint8(f.ContentSize>>32), uint8(f.ContentSize>>40), uint8(f.ContentSize>>48), uint8(f.ContentSize>>56)) + default: + panic("invalid fcs") + } + return dst +} + +const skippableFrameHeader = 4 + 4 + +// calcSkippableFrame will return a total size to be added for written +// to be divisible by multiple. +// The value will always be > skippableFrameHeader. +// The function will panic if written < 0 or wantMultiple <= 0. +func calcSkippableFrame(written, wantMultiple int64) int { + if wantMultiple <= 0 { + panic("wantMultiple <= 0") + } + if written < 0 { + panic("written < 0") + } + leftOver := written % wantMultiple + if leftOver == 0 { + return 0 + } + toAdd := wantMultiple - leftOver + for toAdd < skippableFrameHeader { + toAdd += wantMultiple + } + return int(toAdd) +} + +// skippableFrame will add a skippable frame with a total size of bytes. +// total should be >= skippableFrameHeader and < math.MaxUint32. +func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) { + if total == 0 { + return dst, nil + } + if total < skippableFrameHeader { + return dst, fmt.Errorf("requested skippable frame (%d) < 8", total) + } + if int64(total) > math.MaxUint32 { + return dst, fmt.Errorf("requested skippable frame (%d) > max uint32", total) + } + dst = append(dst, 0x50, 0x2a, 0x4d, 0x18) + f := uint32(total - skippableFrameHeader) + dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)) + start := len(dst) + dst = append(dst, make([]byte, f)...) + _, err := io.ReadFull(r, dst[start:]) + return dst, err +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go new file mode 100644 index 0000000000000..2f8860a722b8f --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go @@ -0,0 +1,307 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "fmt" + "io" +) + +const ( + tablelogAbsoluteMax = 9 +) + +const ( + /*!MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ + maxMemoryUsage = tablelogAbsoluteMax + 2 + + maxTableLog = maxMemoryUsage - 2 + maxTablesize = 1 << maxTableLog + maxTableMask = (1 << maxTableLog) - 1 + minTablelog = 5 + maxSymbolValue = 255 +) + +// fseDecoder provides temporary storage for compression and decompression. +type fseDecoder struct { + dt [maxTablesize]decSymbol // Decompression table. + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + maxBits uint8 // Maximum number of additional bits + + // used for table creation to avoid allocations. + stateTable [256]uint16 + norm [maxSymbolValue + 1]int16 + preDefined bool +} + +// tableStep returns the next table index. +func tableStep(tableSize uint32) uint32 { + return (tableSize >> 1) + (tableSize >> 3) + 3 +} + +// readNCount will read the symbol distribution so decoding tables can be constructed. +func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error { + var ( + charnum uint16 + previous0 bool + ) + if b.remain() < 4 { + return errors.New("input too small") + } + bitStream := b.Uint32NC() + nbBits := uint((bitStream & 0xF) + minTablelog) // extract tableLog + if nbBits > tablelogAbsoluteMax { + println("Invalid tablelog:", nbBits) + return errors.New("tableLog too large") + } + bitStream >>= 4 + bitCount := uint(4) + + s.actualTableLog = uint8(nbBits) + remaining := int32((1 << nbBits) + 1) + threshold := int32(1 << nbBits) + gotTotal := int32(0) + nbBits++ + + for remaining > 1 && charnum <= maxSymbol { + if previous0 { + //println("prev0") + n0 := charnum + for (bitStream & 0xFFFF) == 0xFFFF { + //println("24 x 0") + n0 += 24 + if r := b.remain(); r > 5 { + b.advance(2) + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> bitCount + } else { + // end of bit stream + bitStream >>= 16 + bitCount += 16 + } + } + //printf("bitstream: %d, 0b%b", bitStream&3, bitStream) + for (bitStream & 3) == 3 { + n0 += 3 + bitStream >>= 2 + bitCount += 2 + } + n0 += uint16(bitStream & 3) + bitCount += 2 + + if n0 > maxSymbolValue { + return errors.New("maxSymbolValue too small") + } + //println("inserting ", n0-charnum, "zeroes from idx", charnum, "ending before", n0) + for charnum < n0 { + s.norm[uint8(charnum)] = 0 + charnum++ + } + + if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { + b.advance(bitCount >> 3) + bitCount &= 7 + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> bitCount + } else { + bitStream >>= 2 + } + } + + max := (2*threshold - 1) - remaining + var count int32 + + if int32(bitStream)&(threshold-1) < max { + count = int32(bitStream) & (threshold - 1) + if debugAsserts && nbBits < 1 { + panic("nbBits underflow") + } + bitCount += nbBits - 1 + } else { + count = int32(bitStream) & (2*threshold - 1) + if count >= threshold { + count -= max + } + bitCount += nbBits + } + + // extra accuracy + count-- + if count < 0 { + // -1 means +1 + remaining += count + gotTotal -= count + } else { + remaining -= count + gotTotal += count + } + s.norm[charnum&0xff] = int16(count) + charnum++ + previous0 = count == 0 + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { + b.advance(bitCount >> 3) + bitCount &= 7 + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> (bitCount & 31) + } else { + bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) + b.off = len(b.b) - 4 + bitStream = b.Uint32() >> (bitCount & 31) + } + } + s.symbolLen = charnum + if s.symbolLen <= 1 { + return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) + } + if s.symbolLen > maxSymbolValue+1 { + return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) + } + if remaining != 1 { + return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) + } + if bitCount > 32 { + return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) + } + if gotTotal != 1<> 3) + return s.buildDtable() +} + +func (s *fseDecoder) mustReadFrom(r io.Reader) { + fatalErr := func(err error) { + if err != nil { + panic(err) + } + } + // dt [maxTablesize]decSymbol // Decompression table. + // symbolLen uint16 // Length of active part of the symbol table. + // actualTableLog uint8 // Selected tablelog. + // maxBits uint8 // Maximum number of additional bits + // // used for table creation to avoid allocations. + // stateTable [256]uint16 + // norm [maxSymbolValue + 1]int16 + // preDefined bool + fatalErr(binary.Read(r, binary.LittleEndian, &s.dt)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.symbolLen)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.actualTableLog)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.maxBits)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.stateTable)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.norm)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.preDefined)) +} + +// decSymbol contains information about a state entry, +// Including the state offset base, the output symbol and +// the number of bits to read for the low part of the destination state. +// Using a composite uint64 is faster than a struct with separate members. +type decSymbol uint64 + +func newDecSymbol(nbits, addBits uint8, newState uint16, baseline uint32) decSymbol { + return decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) +} + +func (d decSymbol) nbBits() uint8 { + return uint8(d) +} + +func (d decSymbol) addBits() uint8 { + return uint8(d >> 8) +} + +func (d decSymbol) newState() uint16 { + return uint16(d >> 16) +} + +func (d decSymbol) baselineInt() int { + return int(d >> 32) +} + +func (d *decSymbol) setNBits(nBits uint8) { + const mask = 0xffffffffffffff00 + *d = (*d & mask) | decSymbol(nBits) +} + +func (d *decSymbol) setAddBits(addBits uint8) { + const mask = 0xffffffffffff00ff + *d = (*d & mask) | (decSymbol(addBits) << 8) +} + +func (d *decSymbol) setNewState(state uint16) { + const mask = 0xffffffff0000ffff + *d = (*d & mask) | decSymbol(state)<<16 +} + +func (d *decSymbol) setExt(addBits uint8, baseline uint32) { + const mask = 0xffff00ff + *d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32) +} + +// decSymbolValue returns the transformed decSymbol for the given symbol. +func decSymbolValue(symb uint8, t []baseOffset) (decSymbol, error) { + if int(symb) >= len(t) { + return 0, fmt.Errorf("rle symbol %d >= max %d", symb, len(t)) + } + lu := t[symb] + return newDecSymbol(0, lu.addBits, 0, lu.baseLine), nil +} + +// setRLE will set the decoder til RLE mode. +func (s *fseDecoder) setRLE(symbol decSymbol) { + s.actualTableLog = 0 + s.maxBits = symbol.addBits() + s.dt[0] = symbol +} + +// transform will transform the decoder table into a table usable for +// decoding without having to apply the transformation while decoding. +// The state will contain the base value and the number of bits to read. +func (s *fseDecoder) transform(t []baseOffset) error { + tableSize := uint16(1 << s.actualTableLog) + s.maxBits = 0 + for i, v := range s.dt[:tableSize] { + add := v.addBits() + if int(add) >= len(t) { + return fmt.Errorf("invalid decoding table entry %d, symbol %d >= max (%d)", i, v.addBits(), len(t)) + } + lu := t[add] + if lu.addBits > s.maxBits { + s.maxBits = lu.addBits + } + v.setExt(lu.addBits, lu.baseLine) + s.dt[i] = v + } + return nil +} + +type fseState struct { + dt []decSymbol + state decSymbol +} + +// Initialize and decodeAsync first state and symbol. +func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) { + s.dt = dt + br.fill() + s.state = dt[br.getBits(tableLog)] +} + +// final returns the current state symbol without decoding the next. +func (s decSymbol) final() (int, uint8) { + return s.baselineInt(), s.addBits() +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go new file mode 100644 index 0000000000000..d04a829b0a0e7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go @@ -0,0 +1,65 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package zstd + +import ( + "fmt" +) + +type buildDtableAsmContext struct { + // inputs + stateTable *uint16 + norm *int16 + dt *uint64 + + // outputs --- set by the procedure in the case of error; + // for interpretation please see the error handling part below + errParam1 uint64 + errParam2 uint64 +} + +// buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable. +// Function returns non-zero exit code on error. +// +//go:noescape +func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int + +// please keep in sync with _generate/gen_fse.go +const ( + errorCorruptedNormalizedCounter = 1 + errorNewStateTooBig = 2 + errorNewStateNoBits = 3 +) + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + ctx := buildDtableAsmContext{ + stateTable: &s.stateTable[0], + norm: &s.norm[0], + dt: (*uint64)(&s.dt[0]), + } + code := buildDtable_asm(s, &ctx) + + if code != 0 { + switch code { + case errorCorruptedNormalizedCounter: + position := ctx.errParam1 + return fmt.Errorf("corrupted input (position=%d, expected 0)", position) + + case errorNewStateTooBig: + newState := decSymbol(ctx.errParam1) + size := ctx.errParam2 + return fmt.Errorf("newState (%d) outside table size (%d)", newState, size) + + case errorNewStateNoBits: + newState := decSymbol(ctx.errParam1) + oldState := decSymbol(ctx.errParam2) + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, oldState) + + default: + return fmt.Errorf("buildDtable_asm returned unhandled nonzero code = %d", code) + } + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s new file mode 100644 index 0000000000000..bcde398695351 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s @@ -0,0 +1,126 @@ +// Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm + +// func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int +TEXT ·buildDtable_asm(SB), $0-24 + MOVQ ctx+8(FP), CX + MOVQ s+0(FP), DI + + // Load values + MOVBQZX 4098(DI), DX + XORQ AX, AX + BTSQ DX, AX + MOVQ (CX), BX + MOVQ 16(CX), SI + LEAQ -1(AX), R8 + MOVQ 8(CX), CX + MOVWQZX 4096(DI), DI + + // End load values + // Init, lay down lowprob symbols + XORQ R9, R9 + JMP init_main_loop_condition + +init_main_loop: + MOVWQSX (CX)(R9*2), R10 + CMPW R10, $-1 + JNE do_not_update_high_threshold + MOVB R9, 1(SI)(R8*8) + DECQ R8 + MOVQ $0x0000000000000001, R10 + +do_not_update_high_threshold: + MOVW R10, (BX)(R9*2) + INCQ R9 + +init_main_loop_condition: + CMPQ R9, DI + JL init_main_loop + + // Spread symbols + // Calculate table step + MOVQ AX, R9 + SHRQ $0x01, R9 + MOVQ AX, R10 + SHRQ $0x03, R10 + LEAQ 3(R9)(R10*1), R9 + + // Fill add bits values + LEAQ -1(AX), R10 + XORQ R11, R11 + XORQ R12, R12 + JMP spread_main_loop_condition + +spread_main_loop: + XORQ R13, R13 + MOVWQSX (CX)(R12*2), R14 + JMP spread_inner_loop_condition + +spread_inner_loop: + MOVB R12, 1(SI)(R11*8) + +adjust_position: + ADDQ R9, R11 + ANDQ R10, R11 + CMPQ R11, R8 + JG adjust_position + INCQ R13 + +spread_inner_loop_condition: + CMPQ R13, R14 + JL spread_inner_loop + INCQ R12 + +spread_main_loop_condition: + CMPQ R12, DI + JL spread_main_loop + TESTQ R11, R11 + JZ spread_check_ok + MOVQ ctx+8(FP), AX + MOVQ R11, 24(AX) + MOVQ $+1, ret+16(FP) + RET + +spread_check_ok: + // Build Decoding table + XORQ DI, DI + +build_table_main_table: + MOVBQZX 1(SI)(DI*8), CX + MOVWQZX (BX)(CX*2), R8 + LEAQ 1(R8), R9 + MOVW R9, (BX)(CX*2) + MOVQ R8, R9 + BSRQ R9, R9 + MOVQ DX, CX + SUBQ R9, CX + SHLQ CL, R8 + SUBQ AX, R8 + MOVB CL, (SI)(DI*8) + MOVW R8, 2(SI)(DI*8) + CMPQ R8, AX + JLE build_table_check1_ok + MOVQ ctx+8(FP), CX + MOVQ R8, 24(CX) + MOVQ AX, 32(CX) + MOVQ $+2, ret+16(FP) + RET + +build_table_check1_ok: + TESTB CL, CL + JNZ build_table_check2_ok + CMPW R8, DI + JNE build_table_check2_ok + MOVQ ctx+8(FP), AX + MOVQ R8, 24(AX) + MOVQ DI, 32(AX) + MOVQ $+3, ret+16(FP) + RET + +build_table_check2_ok: + INCQ DI + CMPQ DI, AX + JL build_table_main_table + MOVQ $+0, ret+16(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go new file mode 100644 index 0000000000000..8adfebb029798 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go @@ -0,0 +1,73 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package zstd + +import ( + "errors" + "fmt" +) + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + symbolNext := s.stateTable[:256] + + // Init, lay down lowprob symbols + { + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.dt[highThreshold].setAddBits(uint8(i)) + highThreshold-- + v = 1 + } + symbolNext[i] = uint16(v) + } + } + + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.dt[position].setAddBits(uint8(ss)) + for { + // lowprob area + position = (position + step) & tableMask + if position <= highThreshold { + break + } + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.dt[:tableSize] { + symbol := v.addBits() + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.dt[u&maxTableMask].setNBits(nBits) + newState := (nextState << nBits) - tableSize + if newState > tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.dt[u&maxTableMask].setNewState(newState) + } + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go new file mode 100644 index 0000000000000..ab26326a8ff80 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go @@ -0,0 +1,701 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math" +) + +const ( + // For encoding we only support up to + maxEncTableLog = 8 + maxEncTablesize = 1 << maxTableLog + maxEncTableMask = (1 << maxTableLog) - 1 + minEncTablelog = 5 + maxEncSymbolValue = maxMatchLengthSymbol +) + +// Scratch provides temporary storage for compression and decompression. +type fseEncoder struct { + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + ct cTable // Compression tables. + maxCount int // count of the most probable symbol + zeroBits bool // no bits has prob > 50%. + clearCount bool // clear count + useRLE bool // This encoder is for RLE + preDefined bool // This encoder is predefined. + reUsed bool // Set to know when the encoder has been reused. + rleVal uint8 // RLE Symbol + maxBits uint8 // Maximum output bits after transform. + + // TODO: Technically zstd should be fine with 64 bytes. + count [256]uint32 + norm [256]int16 +} + +// cTable contains tables used for compression. +type cTable struct { + tableSymbol []byte + stateTable []uint16 + symbolTT []symbolTransform +} + +// symbolTransform contains the state transform for a symbol. +type symbolTransform struct { + deltaNbBits uint32 + deltaFindState int16 + outBits uint8 +} + +// String prints values as a human readable string. +func (s symbolTransform) String() string { + return fmt.Sprintf("{deltabits: %08x, findstate:%d outbits:%d}", s.deltaNbBits, s.deltaFindState, s.outBits) +} + +// Histogram allows to populate the histogram and skip that step in the compression, +// It otherwise allows to inspect the histogram when compression is done. +// To indicate that you have populated the histogram call HistogramFinished +// with the value of the highest populated symbol, as well as the number of entries +// in the most populated entry. These are accepted at face value. +func (s *fseEncoder) Histogram() *[256]uint32 { + return &s.count +} + +// HistogramFinished can be called to indicate that the histogram has been populated. +// maxSymbol is the index of the highest set symbol of the next data segment. +// maxCount is the number of entries in the most populated entry. +// These are accepted at face value. +func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) { + s.maxCount = maxCount + s.symbolLen = uint16(maxSymbol) + 1 + s.clearCount = maxCount != 0 +} + +// allocCtable will allocate tables needed for compression. +// If existing tables a re big enough, they are simply re-used. +func (s *fseEncoder) allocCtable() { + tableSize := 1 << s.actualTableLog + // get tableSymbol that is big enough. + if cap(s.ct.tableSymbol) < tableSize { + s.ct.tableSymbol = make([]byte, tableSize) + } + s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] + + ctSize := tableSize + if cap(s.ct.stateTable) < ctSize { + s.ct.stateTable = make([]uint16, ctSize) + } + s.ct.stateTable = s.ct.stateTable[:ctSize] + + if cap(s.ct.symbolTT) < 256 { + s.ct.symbolTT = make([]symbolTransform, 256) + } + s.ct.symbolTT = s.ct.symbolTT[:256] +} + +// buildCTable will populate the compression table so it is ready to be used. +func (s *fseEncoder) buildCTable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + var cumul [256]int16 + + s.allocCtable() + tableSymbol := s.ct.tableSymbol[:tableSize] + // symbol start positions + { + cumul[0] = 0 + for ui, v := range s.norm[:s.symbolLen-1] { + u := byte(ui) // one less than reference + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = u + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + } + // Encode last symbol separately to avoid overflowing u + u := int(s.symbolLen - 1) + v := s.norm[s.symbolLen-1] + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = byte(u) + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + if uint32(cumul[s.symbolLen]) != tableSize { + return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) + } + cumul[s.symbolLen] = int16(tableSize) + 1 + } + // Spread symbols + s.zeroBits = false + { + step := tableStep(tableSize) + tableMask := tableSize - 1 + var position uint32 + // if any symbol > largeLimit, we may have 0 bits output. + largeLimit := int16(1 << (s.actualTableLog - 1)) + for ui, v := range s.norm[:s.symbolLen] { + symbol := byte(ui) + if v > largeLimit { + s.zeroBits = true + } + for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + tableSymbol[position] = symbol + position = (position + step) & tableMask + for position > highThreshold { + position = (position + step) & tableMask + } /* Low proba area */ + } + } + + // Check if we have gone through all positions + if position != 0 { + return errors.New("position!=0") + } + } + + // Build table + table := s.ct.stateTable + { + tsi := int(tableSize) + for u, v := range tableSymbol { + // TableU16 : sorted by symbol order; gives next state value + table[cumul[v]] = uint16(tsi + u) + cumul[v]++ + } + } + + // Build Symbol Transformation Table + { + total := int16(0) + symbolTT := s.ct.symbolTT[:s.symbolLen] + tableLog := s.actualTableLog + tl := (uint32(tableLog) << 16) - (1 << tableLog) + for i, v := range s.norm[:s.symbolLen] { + switch v { + case 0: + case -1, 1: + symbolTT[i].deltaNbBits = tl + symbolTT[i].deltaFindState = total - 1 + total++ + default: + maxBitsOut := uint32(tableLog) - highBit(uint32(v-1)) + minStatePlus := uint32(v) << maxBitsOut + symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus + symbolTT[i].deltaFindState = total - v + total += v + } + } + if total != int16(tableSize) { + return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) + } + } + return nil +} + +var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} + +func (s *fseEncoder) setRLE(val byte) { + s.allocCtable() + s.actualTableLog = 0 + s.ct.stateTable = s.ct.stateTable[:1] + s.ct.symbolTT[val] = symbolTransform{ + deltaFindState: 0, + deltaNbBits: 0, + } + if debugEncoder { + println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val]) + } + s.rleVal = val + s.useRLE = true +} + +// setBits will set output bits for the transform. +// if nil is provided, the number of bits is equal to the index. +func (s *fseEncoder) setBits(transform []byte) { + if s.reUsed || s.preDefined { + return + } + if s.useRLE { + if transform == nil { + s.ct.symbolTT[s.rleVal].outBits = s.rleVal + s.maxBits = s.rleVal + return + } + s.maxBits = transform[s.rleVal] + s.ct.symbolTT[s.rleVal].outBits = s.maxBits + return + } + if transform == nil { + for i := range s.ct.symbolTT[:s.symbolLen] { + s.ct.symbolTT[i].outBits = uint8(i) + } + s.maxBits = uint8(s.symbolLen - 1) + return + } + s.maxBits = 0 + for i, v := range transform[:s.symbolLen] { + s.ct.symbolTT[i].outBits = v + if v > s.maxBits { + // We could assume bits always going up, but we play safe. + s.maxBits = v + } + } +} + +// normalizeCount will normalize the count of the symbols so +// the total is equal to the table size. +// If successful, compression tables will also be made ready. +func (s *fseEncoder) normalizeCount(length int) error { + if s.reUsed { + return nil + } + s.optimalTableLog(length) + var ( + tableLog = s.actualTableLog + scale = 62 - uint64(tableLog) + step = (1 << 62) / uint64(length) + vStep = uint64(1) << (scale - 20) + stillToDistribute = int16(1 << tableLog) + largest int + largestP int16 + lowThreshold = (uint32)(length >> tableLog) + ) + if s.maxCount == length { + s.useRLE = true + return nil + } + s.useRLE = false + for i, cnt := range s.count[:s.symbolLen] { + // already handled + // if (count[s] == s.length) return 0; /* rle special case */ + + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + stillToDistribute-- + } else { + proba := (int16)((uint64(cnt) * step) >> scale) + if proba < 8 { + restToBeat := vStep * uint64(rtbTable[proba]) + v := uint64(cnt)*step - (uint64(proba) << scale) + if v > restToBeat { + proba++ + } + } + if proba > largestP { + largestP = proba + largest = i + } + s.norm[i] = proba + stillToDistribute -= proba + } + } + + if -stillToDistribute >= (s.norm[largest] >> 1) { + // corner case, need another normalization method + err := s.normalizeCount2(length) + if err != nil { + return err + } + if debugAsserts { + err = s.validateNorm() + if err != nil { + return err + } + } + return s.buildCTable() + } + s.norm[largest] += stillToDistribute + if debugAsserts { + err := s.validateNorm() + if err != nil { + return err + } + } + return s.buildCTable() +} + +// Secondary normalization method. +// To be used when primary method fails. +func (s *fseEncoder) normalizeCount2(length int) error { + const notYetAssigned = -2 + var ( + distributed uint32 + total = uint32(length) + tableLog = s.actualTableLog + lowThreshold = total >> tableLog + lowOne = (total * 3) >> (tableLog + 1) + ) + for i, cnt := range s.count[:s.symbolLen] { + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + distributed++ + total -= cnt + continue + } + if cnt <= lowOne { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + s.norm[i] = notYetAssigned + } + toDistribute := (1 << tableLog) - distributed + + if (total / toDistribute) > lowOne { + // risk of rounding to zero + lowOne = (total * 3) / (toDistribute * 2) + for i, cnt := range s.count[:s.symbolLen] { + if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + } + toDistribute = (1 << tableLog) - distributed + } + if distributed == uint32(s.symbolLen)+1 { + // all values are pretty poor; + // probably incompressible data (should have already been detected); + // find max, then give all remaining points to max + var maxV int + var maxC uint32 + for i, cnt := range s.count[:s.symbolLen] { + if cnt > maxC { + maxV = i + maxC = cnt + } + } + s.norm[maxV] += int16(toDistribute) + return nil + } + + if total == 0 { + // all of the symbols were low enough for the lowOne or lowThreshold + for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { + if s.norm[i] > 0 { + toDistribute-- + s.norm[i]++ + } + } + return nil + } + + var ( + vStepLog = 62 - uint64(tableLog) + mid = uint64((1 << (vStepLog - 1)) - 1) + rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining + tmpTotal = mid + ) + for i, cnt := range s.count[:s.symbolLen] { + if s.norm[i] == notYetAssigned { + var ( + end = tmpTotal + uint64(cnt)*rStep + sStart = uint32(tmpTotal >> vStepLog) + sEnd = uint32(end >> vStepLog) + weight = sEnd - sStart + ) + if weight < 1 { + return errors.New("weight < 1") + } + s.norm[i] = int16(weight) + tmpTotal = end + } + } + return nil +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *fseEncoder) optimalTableLog(length int) { + tableLog := uint8(maxEncTableLog) + minBitsSrc := highBit(uint32(length)) + 1 + minBitsSymbols := highBit(uint32(s.symbolLen-1)) + 2 + minBits := uint8(minBitsSymbols) + if minBitsSrc < minBitsSymbols { + minBits = uint8(minBitsSrc) + } + + maxBitsSrc := uint8(highBit(uint32(length-1))) - 2 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minEncTablelog { + tableLog = minEncTablelog + } + if tableLog > maxEncTableLog { + tableLog = maxEncTableLog + } + s.actualTableLog = tableLog +} + +// validateNorm validates the normalized histogram table. +func (s *fseEncoder) validateNorm() (err error) { + var total int + for _, v := range s.norm[:s.symbolLen] { + if v >= 0 { + total += int(v) + } else { + total -= int(v) + } + } + defer func() { + if err == nil { + return + } + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) + for i, v := range s.norm[:s.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) + } + }() + if total != (1 << s.actualTableLog) { + return fmt.Errorf("warning: Total == %d != %d", total, 1<> 3) + 3 + 2 + + // Write Table Size + bitStream = uint32(tableLog - minEncTablelog) + bitCount = uint(4) + remaining = int16(tableSize + 1) /* +1 for extra accuracy */ + threshold = int16(tableSize) + nbBits = uint(tableLog + 1) + outP = len(out) + ) + if cap(out) < outP+maxHeaderSize { + out = append(out, make([]byte, maxHeaderSize*3)...) + out = out[:len(out)-maxHeaderSize*3] + } + out = out[:outP+maxHeaderSize] + + // stops at 1 + for remaining > 1 { + if previous0 { + start := charnum + for s.norm[charnum] == 0 { + charnum++ + } + for charnum >= start+24 { + start += 24 + bitStream += uint32(0xFFFF) << bitCount + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + } + for charnum >= start+3 { + start += 3 + bitStream += 3 << bitCount + bitCount += 2 + } + bitStream += uint32(charnum-start) << bitCount + bitCount += 2 + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + count := s.norm[charnum] + charnum++ + max := (2*threshold - 1) - remaining + if count < 0 { + remaining += count + } else { + remaining -= count + } + count++ // +1 for extra accuracy + if count >= threshold { + count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ + } + bitStream += uint32(count) << bitCount + bitCount += nbBits + if count < max { + bitCount-- + } + + previous0 = count == 1 + if remaining < 1 { + return nil, errors.New("internal error: remaining < 1") + } + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + if outP+2 > len(out) { + return nil, fmt.Errorf("internal error: %d > %d, maxheader: %d, sl: %d, tl: %d, normcount: %v", outP+2, len(out), maxHeaderSize, s.symbolLen, int(tableLog), s.norm[:s.symbolLen]) + } + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += int((bitCount + 7) / 8) + + if charnum > s.symbolLen { + return nil, errors.New("internal error: charnum > s.symbolLen") + } + return out[:outP], nil +} + +// Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits) +// note 1 : assume symbolValue is valid (<= maxSymbolValue) +// note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits * +func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 { + minNbBits := s.ct.symbolTT[symbolValue].deltaNbBits >> 16 + threshold := (minNbBits + 1) << 16 + if debugAsserts { + if !(s.actualTableLog < 16) { + panic("!s.actualTableLog < 16") + } + // ensure enough room for renormalization double shift + if !(uint8(accuracyLog) < 31-s.actualTableLog) { + panic("!uint8(accuracyLog) < 31-s.actualTableLog") + } + } + tableSize := uint32(1) << s.actualTableLog + deltaFromThreshold := threshold - (s.ct.symbolTT[symbolValue].deltaNbBits + tableSize) + // linear interpolation (very approximate) + normalizedDeltaFromThreshold := (deltaFromThreshold << accuracyLog) >> s.actualTableLog + bitMultiplier := uint32(1) << accuracyLog + if debugAsserts { + if s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold { + panic("s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold") + } + if normalizedDeltaFromThreshold > bitMultiplier { + panic("normalizedDeltaFromThreshold > bitMultiplier") + } + } + return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold +} + +// Returns the cost in bits of encoding the distribution in count using ctable. +// Histogram should only be up to the last non-zero symbol. +// Returns an -1 if ctable cannot represent all the symbols in count. +func (s *fseEncoder) approxSize(hist []uint32) uint32 { + if int(s.symbolLen) < len(hist) { + // More symbols than we have. + return math.MaxUint32 + } + if s.useRLE { + // We will never reuse RLE encoders. + return math.MaxUint32 + } + const kAccuracyLog = 8 + badCost := (uint32(s.actualTableLog) + 1) << kAccuracyLog + var cost uint32 + for i, v := range hist { + if v == 0 { + continue + } + if s.norm[i] == 0 { + return math.MaxUint32 + } + bitCost := s.bitCost(uint8(i), kAccuracyLog) + if bitCost > badCost { + return math.MaxUint32 + } + cost += v * bitCost + } + return cost >> kAccuracyLog +} + +// maxHeaderSize returns the maximum header size in bits. +// This is not exact size, but we want a penalty for new tables anyway. +func (s *fseEncoder) maxHeaderSize() uint32 { + if s.preDefined { + return 0 + } + if s.useRLE { + return 8 + } + return (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8 +} + +// cState contains the compression state of a stream. +type cState struct { + bw *bitWriter + stateTable []uint16 + state uint16 +} + +// init will initialize the compression state to the first symbol of the stream. +func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) { + c.bw = bw + c.stateTable = ct.stateTable + if len(c.stateTable) == 1 { + // RLE + c.stateTable[0] = uint16(0) + c.state = 0 + return + } + nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 + im := int32((nbBitsOut << 16) - first.deltaNbBits) + lu := (im >> nbBitsOut) + int32(first.deltaFindState) + c.state = c.stateTable[lu] +} + +// flush will write the tablelog to the output and flush the remaining full bytes. +func (c *cState) flush(tableLog uint8) { + c.bw.flush32() + c.bw.addBits16NC(c.state, tableLog) +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go new file mode 100644 index 0000000000000..474cb77d2b999 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go @@ -0,0 +1,158 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "math" + "sync" +) + +var ( + // fsePredef are the predefined fse tables as defined here: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + // These values are already transformed. + fsePredef [3]fseDecoder + + // fsePredefEnc are the predefined encoder based on fse tables as defined here: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + // These values are already transformed. + fsePredefEnc [3]fseEncoder + + // symbolTableX contain the transformations needed for each type as defined in + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets + symbolTableX [3][]baseOffset + + // maxTableSymbol is the biggest supported symbol for each table type + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets + maxTableSymbol = [3]uint8{tableLiteralLengths: maxLiteralLengthSymbol, tableOffsets: maxOffsetLengthSymbol, tableMatchLengths: maxMatchLengthSymbol} + + // bitTables is the bits table for each table. + bitTables = [3][]byte{tableLiteralLengths: llBitsTable[:], tableOffsets: nil, tableMatchLengths: mlBitsTable[:]} +) + +type tableIndex uint8 + +const ( + // indexes for fsePredef and symbolTableX + tableLiteralLengths tableIndex = 0 + tableOffsets tableIndex = 1 + tableMatchLengths tableIndex = 2 + + maxLiteralLengthSymbol = 35 + maxOffsetLengthSymbol = 30 + maxMatchLengthSymbol = 52 +) + +// baseOffset is used for calculating transformations. +type baseOffset struct { + baseLine uint32 + addBits uint8 +} + +// fillBase will precalculate base offsets with the given bit distributions. +func fillBase(dst []baseOffset, base uint32, bits ...uint8) { + if len(bits) != len(dst) { + panic(fmt.Sprintf("len(dst) (%d) != len(bits) (%d)", len(dst), len(bits))) + } + for i, bit := range bits { + if base > math.MaxInt32 { + panic("invalid decoding table, base overflows int32") + } + + dst[i] = baseOffset{ + baseLine: base, + addBits: bit, + } + base += 1 << bit + } +} + +var predef sync.Once + +func initPredefined() { + predef.Do(func() { + // Literals length codes + tmp := make([]baseOffset, 36) + for i := range tmp[:16] { + tmp[i] = baseOffset{ + baseLine: uint32(i), + addBits: 0, + } + } + fillBase(tmp[16:], 16, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) + symbolTableX[tableLiteralLengths] = tmp + + // Match length codes + tmp = make([]baseOffset, 53) + for i := range tmp[:32] { + tmp[i] = baseOffset{ + // The transformation adds the 3 length. + baseLine: uint32(i) + 3, + addBits: 0, + } + } + fillBase(tmp[32:], 35, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) + symbolTableX[tableMatchLengths] = tmp + + // Offset codes + tmp = make([]baseOffset, maxOffsetBits+1) + tmp[1] = baseOffset{ + baseLine: 1, + addBits: 1, + } + fillBase(tmp[2:], 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30) + symbolTableX[tableOffsets] = tmp + + // Fill predefined tables and transform them. + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + for i := range fsePredef[:] { + f := &fsePredef[i] + switch tableIndex(i) { + case tableLiteralLengths: + // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L243 + f.actualTableLog = 6 + copy(f.norm[:], []int16{4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, + -1, -1, -1, -1}) + f.symbolLen = 36 + case tableOffsets: + // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L281 + f.actualTableLog = 5 + copy(f.norm[:], []int16{ + 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1}) + f.symbolLen = 29 + case tableMatchLengths: + //https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L304 + f.actualTableLog = 6 + copy(f.norm[:], []int16{ + 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, + -1, -1, -1, -1, -1}) + f.symbolLen = 53 + } + if err := f.buildDtable(); err != nil { + panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) + } + if err := f.transform(symbolTableX[i]); err != nil { + panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) + } + f.preDefined = true + + // Create encoder as well + enc := &fsePredefEnc[i] + copy(enc.norm[:], f.norm[:]) + enc.symbolLen = f.symbolLen + enc.actualTableLog = f.actualTableLog + if err := enc.buildCTable(); err != nil { + panic(fmt.Errorf("building encoding table %v: %v", tableIndex(i), err)) + } + enc.setBits(bitTables[i]) + enc.preDefined = true + } + }) +} diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go new file mode 100644 index 0000000000000..5d73c21ebdd4b --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/hash.go @@ -0,0 +1,35 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +const ( + prime3bytes = 506832829 + prime4bytes = 2654435761 + prime5bytes = 889523592379 + prime6bytes = 227718039650203 + prime7bytes = 58295818150454627 + prime8bytes = 0xcf1bbcdcb7a56463 +) + +// hashLen returns a hash of the lowest mls bytes of with length output bits. +// mls must be >=3 and <=8. Any other value will return hash for 4 bytes. +// length should always be < 32. +// Preferably length and mls should be a constant for inlining. +func hashLen(u uint64, length, mls uint8) uint32 { + switch mls { + case 3: + return (uint32(u<<8) * prime3bytes) >> (32 - length) + case 5: + return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length)) + case 6: + return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length)) + case 7: + return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length)) + case 8: + return uint32((u * prime8bytes) >> (64 - length)) + default: + return (uint32(u) * prime4bytes) >> (32 - length) + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go new file mode 100644 index 0000000000000..09164856d2224 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/history.go @@ -0,0 +1,116 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "github.com/klauspost/compress/huff0" +) + +// history contains the information transferred between blocks. +type history struct { + // Literal decompression + huffTree *huff0.Scratch + + // Sequence decompression + decoders sequenceDecs + recentOffsets [3]int + + // History buffer... + b []byte + + // ignoreBuffer is meant to ignore a number of bytes + // when checking for matches in history + ignoreBuffer int + + windowSize int + allocFrameBuffer int // needed? + error bool + dict *dict +} + +// reset will reset the history to initial state of a frame. +// The history must already have been initialized to the desired size. +func (h *history) reset() { + h.b = h.b[:0] + h.ignoreBuffer = 0 + h.error = false + h.recentOffsets = [3]int{1, 4, 8} + h.decoders.freeDecoders() + h.decoders = sequenceDecs{br: h.decoders.br} + h.freeHuffDecoder() + h.huffTree = nil + h.dict = nil + //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b)) +} + +func (h *history) freeHuffDecoder() { + if h.huffTree != nil { + if h.dict == nil || h.dict.litEnc != h.huffTree { + huffDecoderPool.Put(h.huffTree) + h.huffTree = nil + } + } +} + +func (h *history) setDict(dict *dict) { + if dict == nil { + return + } + h.dict = dict + h.decoders.litLengths = dict.llDec + h.decoders.offsets = dict.ofDec + h.decoders.matchLengths = dict.mlDec + h.decoders.dict = dict.content + h.recentOffsets = dict.offsets + h.huffTree = dict.litEnc +} + +// append bytes to history. +// This function will make sure there is space for it, +// if the buffer has been allocated with enough extra space. +func (h *history) append(b []byte) { + if len(b) >= h.windowSize { + // Discard all history by simply overwriting + h.b = h.b[:h.windowSize] + copy(h.b, b[len(b)-h.windowSize:]) + return + } + + // If there is space, append it. + if len(b) < cap(h.b)-len(h.b) { + h.b = append(h.b, b...) + return + } + + // Move data down so we only have window size left. + // We know we have less than window size in b at this point. + discard := len(b) + len(h.b) - h.windowSize + copy(h.b, h.b[discard:]) + h.b = h.b[:h.windowSize] + copy(h.b[h.windowSize-len(b):], b) +} + +// ensureBlock will ensure there is space for at least one block... +func (h *history) ensureBlock() { + if cap(h.b) < h.allocFrameBuffer { + h.b = make([]byte, 0, h.allocFrameBuffer) + return + } + + avail := cap(h.b) - len(h.b) + if avail >= h.windowSize || avail > maxCompressedBlockSize { + return + } + // Move data down so we only have window size left. + // We know we have less than window size in b at this point. + discard := len(h.b) - h.windowSize + copy(h.b, h.b[discard:]) + h.b = h.b[:h.windowSize] +} + +// append bytes to history without ever discarding anything. +func (h *history) appendKeep(b []byte) { + h.b = append(h.b, b...) +} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt new file mode 100644 index 0000000000000..24b53065f40b5 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2016 Caleb Spare + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md new file mode 100644 index 0000000000000..777290d44ced7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md @@ -0,0 +1,71 @@ +# xxhash + +VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package. + +xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a +high-quality hashing algorithm that is much faster than anything in the Go +standard library. + +This package provides a straightforward API: + +``` +func Sum64(b []byte) uint64 +func Sum64String(s string) uint64 +type Digest struct{ ... } + func New() *Digest +``` + +The `Digest` type implements hash.Hash64. Its key methods are: + +``` +func (*Digest) Write([]byte) (int, error) +func (*Digest) WriteString(string) (int, error) +func (*Digest) Sum64() uint64 +``` + +The package is written with optimized pure Go and also contains even faster +assembly implementations for amd64 and arm64. If desired, the `purego` build tag +opts into using the Go code even on those architectures. + +[xxHash]: http://cyan4973.github.io/xxHash/ + +## Compatibility + +This package is in a module and the latest code is in version 2 of the module. +You need a version of Go with at least "minimal module compatibility" to use +github.com/cespare/xxhash/v2: + +* 1.9.7+ for Go 1.9 +* 1.10.3+ for Go 1.10 +* Go 1.11 or later + +I recommend using the latest release of Go. + +## Benchmarks + +Here are some quick benchmarks comparing the pure-Go and assembly +implementations of Sum64. + +| input size | purego | asm | +| ---------- | --------- | --------- | +| 4 B | 1.3 GB/s | 1.2 GB/s | +| 16 B | 2.9 GB/s | 3.5 GB/s | +| 100 B | 6.9 GB/s | 8.1 GB/s | +| 4 KB | 11.7 GB/s | 16.7 GB/s | +| 10 MB | 12.0 GB/s | 17.3 GB/s | + +These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C +CPU using the following commands under Go 1.19.2: + +``` +benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$') +benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') +``` + +## Projects using this package + +- [InfluxDB](https://github.com/influxdata/influxdb) +- [Prometheus](https://github.com/prometheus/prometheus) +- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) +- [FreeCache](https://github.com/coocood/freecache) +- [FastCache](https://github.com/VictoriaMetrics/fastcache) diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go new file mode 100644 index 0000000000000..fc40c820016ca --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go @@ -0,0 +1,230 @@ +// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described +// at http://cyan4973.github.io/xxHash/. +// THIS IS VENDORED: Go to github.com/cespare/xxhash for original package. + +package xxhash + +import ( + "encoding/binary" + "errors" + "math/bits" +) + +const ( + prime1 uint64 = 11400714785074694791 + prime2 uint64 = 14029467366897019727 + prime3 uint64 = 1609587929392839161 + prime4 uint64 = 9650029242287828579 + prime5 uint64 = 2870177450012600261 +) + +// Store the primes in an array as well. +// +// The consts are used when possible in Go code to avoid MOVs but we need a +// contiguous array of the assembly code. +var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} + +// Digest implements hash.Hash64. +type Digest struct { + v1 uint64 + v2 uint64 + v3 uint64 + v4 uint64 + total uint64 + mem [32]byte + n int // how much of mem is used +} + +// New creates a new Digest that computes the 64-bit xxHash algorithm. +func New() *Digest { + var d Digest + d.Reset() + return &d +} + +// Reset clears the Digest's state so that it can be reused. +func (d *Digest) Reset() { + d.v1 = primes[0] + prime2 + d.v2 = prime2 + d.v3 = 0 + d.v4 = -primes[0] + d.total = 0 + d.n = 0 +} + +// Size always returns 8 bytes. +func (d *Digest) Size() int { return 8 } + +// BlockSize always returns 32 bytes. +func (d *Digest) BlockSize() int { return 32 } + +// Write adds more data to d. It always returns len(b), nil. +func (d *Digest) Write(b []byte) (n int, err error) { + n = len(b) + d.total += uint64(n) + + memleft := d.mem[d.n&(len(d.mem)-1):] + + if d.n+n < 32 { + // This new data doesn't even fill the current block. + copy(memleft, b) + d.n += n + return + } + + if d.n > 0 { + // Finish off the partial block. + c := copy(memleft, b) + d.v1 = round(d.v1, u64(d.mem[0:8])) + d.v2 = round(d.v2, u64(d.mem[8:16])) + d.v3 = round(d.v3, u64(d.mem[16:24])) + d.v4 = round(d.v4, u64(d.mem[24:32])) + b = b[c:] + d.n = 0 + } + + if len(b) >= 32 { + // One or more full blocks left. + nw := writeBlocks(d, b) + b = b[nw:] + } + + // Store any remaining partial block. + copy(d.mem[:], b) + d.n = len(b) + + return +} + +// Sum appends the current hash to b and returns the resulting slice. +func (d *Digest) Sum(b []byte) []byte { + s := d.Sum64() + return append( + b, + byte(s>>56), + byte(s>>48), + byte(s>>40), + byte(s>>32), + byte(s>>24), + byte(s>>16), + byte(s>>8), + byte(s), + ) +} + +// Sum64 returns the current hash. +func (d *Digest) Sum64() uint64 { + var h uint64 + + if d.total >= 32 { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = d.v3 + prime5 + } + + h += d.total + + b := d.mem[:d.n&(len(d.mem)-1)] + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 + h = rol23(h)*prime2 + prime3 + b = b[4:] + } + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +const ( + magic = "xxh\x06" + marshaledSize = len(magic) + 8*5 + 32 +) + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d *Digest) MarshalBinary() ([]byte, error) { + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + b = appendUint64(b, d.v1) + b = appendUint64(b, d.v2) + b = appendUint64(b, d.v3) + b = appendUint64(b, d.v4) + b = appendUint64(b, d.total) + b = append(b, d.mem[:d.n]...) + b = b[:len(b)+len(d.mem)-d.n] + return b, nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (d *Digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("xxhash: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("xxhash: invalid hash state size") + } + b = b[len(magic):] + b, d.v1 = consumeUint64(b) + b, d.v2 = consumeUint64(b) + b, d.v3 = consumeUint64(b) + b, d.v4 = consumeUint64(b) + b, d.total = consumeUint64(b) + copy(d.mem[:], b) + d.n = int(d.total % uint64(len(d.mem))) + return nil +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.LittleEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := u64(b) + return b[8:], x +} + +func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } +func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } + +func round(acc, input uint64) uint64 { + acc += input * prime2 + acc = rol31(acc) + acc *= prime1 + return acc +} + +func mergeRound(acc, val uint64) uint64 { + val = round(0, val) + acc ^= val + acc = acc*prime1 + prime4 + return acc +} + +func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } +func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } +func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } +func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } +func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } +func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } +func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } +func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s new file mode 100644 index 0000000000000..ddb63aa91b14d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s @@ -0,0 +1,210 @@ +//go:build !appengine && gc && !purego && !noasm +// +build !appengine +// +build gc +// +build !purego +// +build !noasm + +#include "textflag.h" + +// Registers: +#define h AX +#define d AX +#define p SI // pointer to advance through b +#define n DX +#define end BX // loop end +#define v1 R8 +#define v2 R9 +#define v3 R10 +#define v4 R11 +#define x R12 +#define prime1 R13 +#define prime2 R14 +#define prime4 DI + +#define round(acc, x) \ + IMULQ prime2, x \ + ADDQ x, acc \ + ROLQ $31, acc \ + IMULQ prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + IMULQ prime2, x \ + ROLQ $31, x \ + IMULQ prime1, x + +// mergeRound applies a merge round on the two registers acc and x. +// It assumes that prime1, prime2, and prime4 have been loaded. +#define mergeRound(acc, x) \ + round0(x) \ + XORQ x, acc \ + IMULQ prime1, acc \ + ADDQ prime4, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that there is at least one block +// to process. +#define blockLoop() \ +loop: \ + MOVQ +0(p), x \ + round(v1, x) \ + MOVQ +8(p), x \ + round(v2, x) \ + MOVQ +16(p), x \ + round(v3, x) \ + MOVQ +24(p), x \ + round(v4, x) \ + ADDQ $32, p \ + CMPQ p, end \ + JLE loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + // Load fixed primes. + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + MOVQ ·primes+24(SB), prime4 + + // Load slice. + MOVQ b_base+0(FP), p + MOVQ b_len+8(FP), n + LEAQ (p)(n*1), end + + // The first loop limit will be len(b)-32. + SUBQ $32, end + + // Check whether we have at least one block. + CMPQ n, $32 + JLT noBlocks + + // Set up initial state (v1, v2, v3, v4). + MOVQ prime1, v1 + ADDQ prime2, v1 + MOVQ prime2, v2 + XORQ v3, v3 + XORQ v4, v4 + SUBQ prime1, v4 + + blockLoop() + + MOVQ v1, h + ROLQ $1, h + MOVQ v2, x + ROLQ $7, x + ADDQ x, h + MOVQ v3, x + ROLQ $12, x + ADDQ x, h + MOVQ v4, x + ROLQ $18, x + ADDQ x, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + + JMP afterBlocks + +noBlocks: + MOVQ ·primes+32(SB), h + +afterBlocks: + ADDQ n, h + + ADDQ $24, end + CMPQ p, end + JG try4 + +loop8: + MOVQ (p), x + ADDQ $8, p + round0(x) + XORQ x, h + ROLQ $27, h + IMULQ prime1, h + ADDQ prime4, h + + CMPQ p, end + JLE loop8 + +try4: + ADDQ $4, end + CMPQ p, end + JG try1 + + MOVL (p), x + ADDQ $4, p + IMULQ prime1, x + XORQ x, h + + ROLQ $23, h + IMULQ prime2, h + ADDQ ·primes+16(SB), h + +try1: + ADDQ $4, end + CMPQ p, end + JGE finalize + +loop1: + MOVBQZX (p), x + ADDQ $1, p + IMULQ ·primes+32(SB), x + XORQ x, h + ROLQ $11, h + IMULQ prime1, h + + CMPQ p, end + JL loop1 + +finalize: + MOVQ h, x + SHRQ $33, x + XORQ x, h + IMULQ prime2, h + MOVQ h, x + SHRQ $29, x + XORQ x, h + IMULQ ·primes+16(SB), h + MOVQ h, x + SHRQ $32, x + XORQ x, h + + MOVQ h, ret+24(FP) + RET + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + // Load fixed primes needed for round. + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + + // Load slice. + MOVQ b_base+8(FP), p + MOVQ b_len+16(FP), n + LEAQ (p)(n*1), end + SUBQ $32, end + + // Load vN from d. + MOVQ s+0(FP), d + MOVQ 0(d), v1 + MOVQ 8(d), v2 + MOVQ 16(d), v3 + MOVQ 24(d), v4 + + // We don't need to check the loop condition here; this function is + // always called with at least one block of data to process. + blockLoop() + + // Copy vN back to d. + MOVQ v1, 0(d) + MOVQ v2, 8(d) + MOVQ v3, 16(d) + MOVQ v4, 24(d) + + // The number of bytes written is p minus the old base pointer. + SUBQ b_base+8(FP), p + MOVQ p, ret+32(FP) + + RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s new file mode 100644 index 0000000000000..ae7d4d3295a4a --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s @@ -0,0 +1,184 @@ +//go:build !appengine && gc && !purego && !noasm +// +build !appengine +// +build gc +// +build !purego +// +build !noasm + +#include "textflag.h" + +// Registers: +#define digest R1 +#define h R2 // return value +#define p R3 // input pointer +#define n R4 // input length +#define nblocks R5 // n / 32 +#define prime1 R7 +#define prime2 R8 +#define prime3 R9 +#define prime4 R10 +#define prime5 R11 +#define v1 R12 +#define v2 R13 +#define v3 R14 +#define v4 R15 +#define x1 R20 +#define x2 R21 +#define x3 R22 +#define x4 R23 + +#define round(acc, x) \ + MADD prime2, acc, x, acc \ + ROR $64-31, acc \ + MUL prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + MUL prime2, x \ + ROR $64-31, x \ + MUL prime1, x + +#define mergeRound(acc, x) \ + round0(x) \ + EOR x, acc \ + MADD acc, prime4, prime1, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that n >= 32. +#define blockLoop() \ + LSR $5, n, nblocks \ + PCALIGN $16 \ + loop: \ + LDP.P 16(p), (x1, x2) \ + LDP.P 16(p), (x3, x4) \ + round(v1, x1) \ + round(v2, x2) \ + round(v3, x3) \ + round(v4, x4) \ + SUB $1, nblocks \ + CBNZ nblocks, loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + LDP b_base+0(FP), (p, n) + + LDP ·primes+0(SB), (prime1, prime2) + LDP ·primes+16(SB), (prime3, prime4) + MOVD ·primes+32(SB), prime5 + + CMP $32, n + CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 } + BLT afterLoop + + ADD prime1, prime2, v1 + MOVD prime2, v2 + MOVD $0, v3 + NEG prime1, v4 + + blockLoop() + + ROR $64-1, v1, x1 + ROR $64-7, v2, x2 + ADD x1, x2 + ROR $64-12, v3, x3 + ROR $64-18, v4, x4 + ADD x3, x4 + ADD x2, x4, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + +afterLoop: + ADD n, h + + TBZ $4, n, try8 + LDP.P 16(p), (x1, x2) + + round0(x1) + + // NOTE: here and below, sequencing the EOR after the ROR (using a + // rotated register) is worth a small but measurable speedup for small + // inputs. + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + + round0(x2) + ROR $64-27, h + EOR x2 @> 64-27, h, h + MADD h, prime4, prime1, h + +try8: + TBZ $3, n, try4 + MOVD.P 8(p), x1 + + round0(x1) + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + +try4: + TBZ $2, n, try2 + MOVWU.P 4(p), x2 + + MUL prime1, x2 + ROR $64-23, h + EOR x2 @> 64-23, h, h + MADD h, prime3, prime2, h + +try2: + TBZ $1, n, try1 + MOVHU.P 2(p), x3 + AND $255, x3, x1 + LSR $8, x3, x2 + + MUL prime5, x1 + ROR $64-11, h + EOR x1 @> 64-11, h, h + MUL prime1, h + + MUL prime5, x2 + ROR $64-11, h + EOR x2 @> 64-11, h, h + MUL prime1, h + +try1: + TBZ $0, n, finalize + MOVBU (p), x4 + + MUL prime5, x4 + ROR $64-11, h + EOR x4 @> 64-11, h, h + MUL prime1, h + +finalize: + EOR h >> 33, h + MUL prime2, h + EOR h >> 29, h + MUL prime3, h + EOR h >> 32, h + + MOVD h, ret+24(FP) + RET + +// func writeBlocks(s *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + LDP ·primes+0(SB), (prime1, prime2) + + // Load state. Assume v[1-4] are stored contiguously. + MOVD s+0(FP), digest + LDP 0(digest), (v1, v2) + LDP 16(digest), (v3, v4) + + LDP b_base+8(FP), (p, n) + + blockLoop() + + // Store updated state. + STP (v1, v2), 0(digest) + STP (v3, v4), 16(digest) + + BIC $31, n + MOVD n, ret+32(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go new file mode 100644 index 0000000000000..d4221edf4fd61 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go @@ -0,0 +1,16 @@ +//go:build (amd64 || arm64) && !appengine && gc && !purego && !noasm +// +build amd64 arm64 +// +build !appengine +// +build gc +// +build !purego +// +build !noasm + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +// +//go:noescape +func Sum64(b []byte) uint64 + +//go:noescape +func writeBlocks(s *Digest, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go new file mode 100644 index 0000000000000..0be16cefc7f4a --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go @@ -0,0 +1,76 @@ +//go:build (!amd64 && !arm64) || appengine || !gc || purego || noasm +// +build !amd64,!arm64 appengine !gc purego noasm + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +func Sum64(b []byte) uint64 { + // A simpler version would be + // d := New() + // d.Write(b) + // return d.Sum64() + // but this is faster, particularly for small inputs. + + n := len(b) + var h uint64 + + if n >= 32 { + v1 := primes[0] + prime2 + v2 := prime2 + v3 := uint64(0) + v4 := -primes[0] + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = prime5 + } + + h += uint64(n) + + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 + h = rol23(h)*prime2 + prime3 + b = b[4:] + } + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +func writeBlocks(d *Digest, b []byte) int { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + n := len(b) + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 + return n - len(b) +} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go new file mode 100644 index 0000000000000..6f3b0cb10264c --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go @@ -0,0 +1,11 @@ +package xxhash + +// Sum64String computes the 64-bit xxHash digest of s. +func Sum64String(s string) uint64 { + return Sum64([]byte(s)) +} + +// WriteString adds more data to d. It always returns len(s), nil. +func (d *Digest) WriteString(s string) (n int, err error) { + return d.Write([]byte(s)) +} diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go new file mode 100644 index 0000000000000..f41932b7a4f75 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go @@ -0,0 +1,16 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +// matchLen returns how many bytes match in a and b +// +// It assumes that: +// +// len(a) <= len(b) and len(a) > 0 +// +//go:noescape +func matchLen(a []byte, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s new file mode 100644 index 0000000000000..0782b86e3d15d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s @@ -0,0 +1,66 @@ +// Copied from S2 implementation. + +//go:build !appengine && !noasm && gc && !noasm + +#include "textflag.h" + +// func matchLen(a []byte, b []byte) int +TEXT ·matchLen(SB), NOSPLIT, $0-56 + MOVQ a_base+0(FP), AX + MOVQ b_base+24(FP), CX + MOVQ a_len+8(FP), DX + + // matchLen + XORL SI, SI + CMPL DX, $0x08 + JB matchlen_match4_standalone + +matchlen_loopback_standalone: + MOVQ (AX)(SI*1), BX + XORQ (CX)(SI*1), BX + JZ matchlen_loop_standalone + +#ifdef GOAMD64_v3 + TZCNTQ BX, BX +#else + BSFQ BX, BX +#endif + SHRL $0x03, BX + LEAL (SI)(BX*1), SI + JMP gen_match_len_end + +matchlen_loop_standalone: + LEAL -8(DX), DX + LEAL 8(SI), SI + CMPL DX, $0x08 + JAE matchlen_loopback_standalone + +matchlen_match4_standalone: + CMPL DX, $0x04 + JB matchlen_match2_standalone + MOVL (AX)(SI*1), BX + CMPL (CX)(SI*1), BX + JNE matchlen_match2_standalone + LEAL -4(DX), DX + LEAL 4(SI), SI + +matchlen_match2_standalone: + CMPL DX, $0x02 + JB matchlen_match1_standalone + MOVW (AX)(SI*1), BX + CMPW (CX)(SI*1), BX + JNE matchlen_match1_standalone + LEAL -2(DX), DX + LEAL 2(SI), SI + +matchlen_match1_standalone: + CMPL DX, $0x01 + JB gen_match_len_end + MOVB (AX)(SI*1), BL + CMPB (CX)(SI*1), BL + JNE gen_match_len_end + INCL SI + +gen_match_len_end: + MOVQ SI, ret+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go new file mode 100644 index 0000000000000..57b9c31c027d3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go @@ -0,0 +1,33 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "encoding/binary" + "math/bits" +) + +// matchLen returns the maximum common prefix length of a and b. +// a must be the shortest of the two. +func matchLen(a, b []byte) (n int) { + for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { + diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) + if diff != 0 { + return n + bits.TrailingZeros64(diff)>>3 + } + n += 8 + } + + for i := range a { + if a[i] != b[i] { + break + } + n++ + } + return n + +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go new file mode 100644 index 0000000000000..d7fe6d82d93a5 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -0,0 +1,503 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "io" +) + +type seq struct { + litLen uint32 + matchLen uint32 + offset uint32 + + // Codes are stored here for the encoder + // so they only have to be looked up once. + llCode, mlCode, ofCode uint8 +} + +type seqVals struct { + ll, ml, mo int +} + +func (s seq) String() string { + if s.offset <= 3 { + if s.offset == 0 { + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset: INVALID (0)") + } + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset, " (repeat)") + } + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset-3, " (new)") +} + +type seqCompMode uint8 + +const ( + compModePredefined seqCompMode = iota + compModeRLE + compModeFSE + compModeRepeat +) + +type sequenceDec struct { + // decoder keeps track of the current state and updates it from the bitstream. + fse *fseDecoder + state fseState + repeat bool +} + +// init the state of the decoder with input from stream. +func (s *sequenceDec) init(br *bitReader) error { + if s.fse == nil { + return errors.New("sequence decoder not defined") + } + s.state.init(br, s.fse.actualTableLog, s.fse.dt[:1< cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with hist %d, dict %d, literals: %d into %d bytes\n", len(seqs), len(hist), len(s.dict), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Copy from dictionary... + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + if len(s.dict) == 0 { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // we may be in dictionary. + dictO := len(s.dict) - (seq.mo - (t + len(hist))) + if dictO < 0 || dictO >= len(s.dict) { + return fmt.Errorf("match offset (%d) bigger than current history+dict (%d)", seq.mo, t+len(hist)+len(s.dict)) + } + end := dictO + seq.ml + if end > len(s.dict) { + n := len(s.dict) - dictO + copy(out[t:], s.dict[dictO:]) + t += n + seq.ml -= n + } else { + copy(out[t:], s.dict[dictO:end]) + t += end - dictO + continue + } + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + // We must be in current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + continue + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} + +// decode sequences from the stream with the provided history. +func (s *sequenceDecs) decodeSync(hist []byte) error { + supported, err := s.decodeSyncSimple(hist) + if supported { + return err + } + + br := s.br + seqs := s.nSeqs + startSize := len(s.out) + // Grab full sizes tables, to avoid bounds checks. + llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] + llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + out := s.out + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + if debugDecoder { + println("decodeSync: decoding", seqs, "sequences", br.remain(), "bits remain on stream") + } + for i := seqs - 1; i >= 0; i-- { + if br.overread() { + printf("reading sequence %d, exceeded available data. Overread by %d\n", seqs-i, -br.remain()) + return io.ErrUnexpectedEOF + } + var ll, mo, ml int + if len(br.in) > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("WARNING: temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", seqs-i-1, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + + if ll > len(s.literals) { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals)) + } + size := ll + ml + len(out) + if size-startSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + if size > cap(out) { + // Not enough size, which can happen under high volume block streaming conditions + // but could be if destination slice is too small for sync operations. + // over-allocating here can create a large amount of GC pressure so we try to keep + // it as contained as possible + used := len(out) - startSize + addBytes := 256 + ll + ml + used>>2 + // Clamp to max block size. + if used+addBytes > maxBlockSize { + addBytes = maxBlockSize - used + } + out = append(out, make([]byte, addBytes)...) + out = out[:len(out)-addBytes] + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + + // Add literals + out = append(out, s.literals[:ll]...) + s.literals = s.literals[ll:] + + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + + if mo > len(out)+len(hist) || mo > s.windowSize { + if len(s.dict) == 0 { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) + } + + // we may be in dictionary. + dictO := len(s.dict) - (mo - (len(out) + len(hist))) + if dictO < 0 || dictO >= len(s.dict) { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) + } + end := dictO + ml + if end > len(s.dict) { + out = append(out, s.dict[dictO:]...) + ml -= len(s.dict) - dictO + } else { + out = append(out, s.dict[dictO:end]...) + mo = 0 + ml = 0 + } + } + + // Copy from history. + // TODO: Blocks without history could be made to ignore this completely. + if v := mo - len(out); v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if ml > v { + // Some goes into current block. + // Copy remainder of history + out = append(out, hist[start:]...) + ml -= v + } else { + out = append(out, hist[start:start+ml]...) + ml = 0 + } + } + // We must be in current buffer now + if ml > 0 { + start := len(out) - mo + if ml <= len(out)-start { + // No overlap + out = append(out, out[start:start+ml]...) + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + out = out[:len(out)+ml] + src := out[start : start+ml] + // Destination is the space we just added. + dst := out[len(out)-ml:] + dst = dst[:len(src)] + for i := range src { + dst[i] = src[i] + } + } + } + if i == 0 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.get32BitsFast(nBits) + + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + + if size := len(s.literals) + len(out) - startSize; size > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + + // Add final literals + s.out = append(out, s.literals...) + return br.close() +} + +var bitMask [16]uint16 + +func init() { + for i := range bitMask[:] { + bitMask[i] = uint16((1 << uint(i)) - 1) + } +} + +func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { + // Final will not read from stream. + ll, llB := llState.final() + ml, mlB := mlState.final() + mo, moB := ofState.final() + + // extra bits are stored in reverse order. + br.fill() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fill() + } + // matchlength+literal length, max 32 bits + ml += br.getBits(mlB) + ll += br.getBits(llB) + mo = s.adjustOffset(mo, ll, moB) + return +} + +func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int { + if offsetB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = offset + return offset + } + + if litLen == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + offset++ + } + + if offset == 0 { + return s.prevOffset[0] + } + var temp int + if offset == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[offset] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("temp was 0") + temp = 1 + } + + if offset != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + return temp +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go new file mode 100644 index 0000000000000..8adabd8287776 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -0,0 +1,394 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package zstd + +import ( + "fmt" + "io" + + "github.com/klauspost/compress/internal/cpuinfo" +) + +type decodeSyncAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + litRemain int + out []byte + outPosition int + literals []byte + litPosition int + history []byte + windowSize int + ll int // set on error (not for all errors, please refer to _generate/gen.go) + ml int // set on error (not for all errors, please refer to _generate/gen.go) + mo int // set on error (not for all errors, please refer to _generate/gen.go) +} + +// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer. +// +//go:noescape +func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer. +// +//go:noescape +func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// decode sequences from the stream with the provided history but without a dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + if len(s.dict) > 0 { + return false, nil + } + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize { + return false, nil + } + + // FIXME: Using unsafe memory copies leads to rare, random crashes + // with fuzz testing. It is therefore disabled for now. + const useSafe = true + /* + useSafe := false + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc { + useSafe = true + } + if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) { + useSafe = true + } + if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { + useSafe = true + } + */ + + br := s.br + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + ctx := decodeSyncAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + iteration: s.nSeqs - 1, + litRemain: len(s.literals), + out: s.out, + outPosition: len(s.out), + literals: s.literals, + windowSize: s.windowSize, + history: hist, + } + + s.seqSize = 0 + startSize := len(s.out) + + var errCode int + if cpuinfo.HasBMI2() { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_bmi2(s, br, &ctx) + } + } else { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_amd64(s, br, &ctx) + } + } + switch errCode { + case noError: + break + + case errorMatchLenOfsMismatch: + return true, fmt.Errorf("zero matchoff and matchlen (%d) > 0", ctx.ml) + + case errorMatchLenTooBig: + return true, fmt.Errorf("match len (%d) bigger than max allowed length", ctx.ml) + + case errorMatchOffTooBig: + return true, fmt.Errorf("match offset (%d) bigger than current history (%d)", + ctx.mo, ctx.outPosition+len(hist)-startSize) + + case errorNotEnoughLiterals: + return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", + ctx.ll, ctx.litRemain+ctx.ll) + + case errorOverread: + return true, io.ErrUnexpectedEOF + + case errorNotEnoughSpace: + size := ctx.outPosition + ctx.ll + ctx.ml + if debugDecoder { + println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize) + } + return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + + default: + return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + return true, err + } + + s.literals = s.literals[ctx.litPosition:] + t := ctx.outPosition + s.out = s.out[:t] + + // Add final literals + s.out = append(s.out, s.literals...) + if debugDecoder { + t += len(s.literals) + if t != len(s.out) { + panic(fmt.Errorf("length mismatch, want %d, got %d", len(s.out), t)) + } + } + + return true, nil +} + +// -------------------------------------------------------------------------------- + +type decodeAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + seqs []seqVals + litRemain int +} + +const noError = 0 + +// error reported when mo == 0 && ml > 0 +const errorMatchLenOfsMismatch = 1 + +// error reported when ml > maxMatchLen +const errorMatchLenTooBig = 2 + +// error reported when mo > available history or mo > s.windowSize +const errorMatchOffTooBig = 3 + +// error reported when the sum of literal lengths exeeceds the literal buffer size +const errorNotEnoughLiterals = 4 + +// error reported when capacity of `out` is too small +const errorNotEnoughSpace = 5 + +// error reported when bits are overread. +const errorOverread = 6 + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + ctx := decodeAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + seqs: seqs, + iteration: len(seqs) - 1, + litRemain: len(s.literals), + } + + if debugDecoder { + println("decode: decoding", len(seqs), "sequences", br.remain(), "bits remain on stream") + } + + s.seqSize = 0 + lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56 + var errCode int + if cpuinfo.HasBMI2() { + if lte56bits { + errCode = sequenceDecs_decode_56_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_bmi2(s, br, &ctx) + } + } else { + if lte56bits { + errCode = sequenceDecs_decode_56_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_amd64(s, br, &ctx) + } + } + if errCode != 0 { + i := len(seqs) - ctx.iteration - 1 + switch errCode { + case errorMatchLenOfsMismatch: + ml := ctx.seqs[i].ml + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + + case errorMatchLenTooBig: + ml := ctx.seqs[i].ml + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + + case errorNotEnoughLiterals: + ll := ctx.seqs[i].ll + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll) + case errorOverread: + return io.ErrUnexpectedEOF + } + + return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode) + } + + if ctx.litRemain < 0 { + return fmt.Errorf("literal count is too big: total available %d, total requested %d", + len(s.literals), len(s.literals)-ctx.litRemain) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + if debugDecoder { + println("decode: ", br.remain(), "bits remain on stream. code:", errCode) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// -------------------------------------------------------------------------------- + +type executeAsmContext struct { + seqs []seqVals + seqIndex int + out []byte + history []byte + literals []byte + outPosition int + litPosition int + windowSize int +} + +// sequenceDecs_executeSimple_amd64 implements the main loop of sequenceDecs.executeSimple in x86 asm. +// +// Returns false if a match offset is too big. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool + +// Same as above, but with safe memcopies +// +//go:noescape +func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool + +// executeSimple handles cases when dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize+compressedBlockOverAlloc > cap(s.out) { + addBytes := s.seqSize + len(s.out) + compressedBlockOverAlloc + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + ctx := executeAsmContext{ + seqs: seqs, + seqIndex: 0, + out: out, + history: hist, + outPosition: t, + litPosition: 0, + literals: s.literals, + windowSize: s.windowSize, + } + var ok bool + if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { + ok = sequenceDecs_executeSimple_safe_amd64(&ctx) + } else { + ok = sequenceDecs_executeSimple_amd64(&ctx) + } + if !ok { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", + seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist)) + } + s.literals = s.literals[ctx.litPosition:] + t = ctx.outPosition + + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s new file mode 100644 index 0000000000000..5b06174b89819 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -0,0 +1,4151 @@ +// Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm + +// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_amd64(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 24(CX), DX + MOVBQZX 32(CX), BX + MOVQ (CX), AX + MOVQ 8(CX), SI + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_end + +sequenceDecs_decode_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_byte_by_byte + +sequenceDecs_decode_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decode_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_of_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_of_update_zero: + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_ml_update_zero: + MOVQ AX, 8(R10) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_2_end + +sequenceDecs_decode_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_2_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte + +sequenceDecs_decode_amd64_fill_2_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decode_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_ll_update_zero: + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRL $0x10, DI + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRL $0x10, R8 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRL $0x10, R9 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_amd64_after_adjust + +sequenceDecs_decode_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_amd64_adjust_offset_nonzero + +sequenceDecs_decode_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_amd64_after_adjust + +sequenceDecs_decode_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_amd64_adjust_zero + JEQ sequenceDecs_decode_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_amd64_adjust_three + JMP sequenceDecs_decode_amd64_adjust_two + +sequenceDecs_decode_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_amd64_after_adjust: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 24(AX) + MOVB BL, 32(AX) + MOVQ SI, 8(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + +// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 24(CX), DX + MOVBQZX 32(CX), BX + MOVQ (CX), AX + MOVQ 8(CX), SI + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_56_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_56_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_56_amd64_fill_end + +sequenceDecs_decode_56_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_56_amd64_fill_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decode_56_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte + +sequenceDecs_decode_56_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decode_56_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_of_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_of_update_zero: + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_ml_update_zero: + MOVQ AX, 8(R10) + + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_ll_update_zero: + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRL $0x10, DI + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRL $0x10, R8 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRL $0x10, R9 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_56_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_56_amd64_after_adjust + +sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_56_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_amd64_adjust_offset_nonzero + +sequenceDecs_decode_56_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_56_amd64_after_adjust + +sequenceDecs_decode_56_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_amd64_adjust_zero + JEQ sequenceDecs_decode_56_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_amd64_adjust_three + JMP sequenceDecs_decode_56_amd64_adjust_two + +sequenceDecs_decode_56_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_56_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_56_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_56_amd64_after_adjust: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_56_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_56_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 24(AX) + MOVB BL, 32(AX) + MOVQ SI, 8(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + +// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 + MOVQ br+8(FP), BX + MOVQ 24(BX), AX + MOVBQZX 32(BX), DX + MOVQ (BX), CX + MOVQ 8(BX), BX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_end + +sequenceDecs_decode_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_byte_by_byte + +sequenceDecs_decode_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decode_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_2_end + +sequenceDecs_decode_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_2_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte + +sequenceDecs_decode_bmi2_fill_2_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decode_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_bmi2_skip_update + LEAQ (SI)(DI*1), R14 + ADDQ R8, R14 + MOVBQZX R14, R14 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + + // Update Offset State + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + SHRL $0x10, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + SHRL $0x10, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R15, CX + SHRL $0x10, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decode_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_bmi2_after_adjust + +sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_bmi2_after_adjust + +sequenceDecs_decode_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_bmi2_adjust_zero + JEQ sequenceDecs_decode_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_bmi2_adjust_three + JMP sequenceDecs_decode_bmi2_adjust_two + +sequenceDecs_decode_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_bmi2_after_adjust: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 24(CX) + MOVB DL, 32(CX) + MOVQ BX, 8(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + +// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 + MOVQ br+8(FP), BX + MOVQ 24(BX), AX + MOVBQZX 32(BX), DX + MOVQ (BX), CX + MOVQ 8(BX), BX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_56_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_56_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_56_bmi2_fill_end + +sequenceDecs_decode_56_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_56_bmi2_fill_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decode_56_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte + +sequenceDecs_decode_56_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decode_56_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_bmi2_skip_update + LEAQ (SI)(DI*1), R14 + ADDQ R8, R14 + MOVBQZX R14, R14 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + + // Update Offset State + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + SHRL $0x10, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + SHRL $0x10, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R15, CX + SHRL $0x10, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decode_56_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_56_bmi2_after_adjust + +sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_56_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_56_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_56_bmi2_after_adjust + +sequenceDecs_decode_56_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_bmi2_adjust_zero + JEQ sequenceDecs_decode_56_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_bmi2_adjust_three + JMP sequenceDecs_decode_56_bmi2_adjust_two + +sequenceDecs_decode_56_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_56_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_56_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_56_bmi2_after_adjust: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_56_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_56_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 24(CX) + MOVB DL, 32(CX) + MOVQ BX, 8(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + +// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool +// Requires: SSE +TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9 + MOVQ ctx+0(FP), R10 + MOVQ 8(R10), CX + TESTQ CX, CX + JZ empty_seqs + MOVQ (R10), AX + MOVQ 24(R10), DX + MOVQ 32(R10), BX + MOVQ 80(R10), SI + MOVQ 104(R10), DI + MOVQ 120(R10), R8 + MOVQ 56(R10), R9 + MOVQ 64(R10), R10 + ADDQ R10, R9 + + // seqsBase += 24 * seqIndex + LEAQ (DX)(DX*2), R11 + SHLQ $0x03, R11 + ADDQ R11, AX + + // outBase += outPosition + ADDQ DI, BX + +main_loop: + MOVQ (AX), R11 + MOVQ 16(AX), R12 + MOVQ 8(AX), R13 + + // Copy literals + TESTQ R11, R11 + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (SI)(R14*1), X0 + MOVUPS X0, (BX)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, R11 + JB copy_1 + ADDQ R11, SI + ADDQ R11, BX + ADDQ R11, DI + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + LEAQ (DI)(R10*1), R11 + CMPQ R12, R11 + JG error_match_off_too_big + CMPQ R12, R8 + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, R11 + SUBQ DI, R11 + JLS copy_match + MOVQ R9, R14 + SUBQ R11, R14 + CMPQ R13, R11 + JG copy_all_from_history + MOVQ R13, R11 + SUBQ $0x10, R11 + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R11 + JAE copy_4_loop + LEAQ 16(R14)(R11*1), R14 + LEAQ 16(BX)(R11*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), R11 + MOVB 2(R14), R12 + MOVW R11, (BX) + MOVB R12, 2(BX) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), R11 + MOVL -4(R14)(R13*1), R12 + MOVL R11, (BX) + MOVL R12, -4(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), R11 + MOVQ -8(R14)(R13*1), R12 + MOVQ R11, (BX) + MOVQ R12, -8(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + +copy_4_end: + ADDQ R13, DI + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + JMP loop_finished + +copy_all_from_history: + MOVQ R11, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(BX)(R15*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_5_end + +copy_5_small: + CMPQ R11, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ R11, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(R11*1), BP + MOVB R15, (BX) + MOVB BP, -1(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (BX) + MOVB BP, 2(BX) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(R11*1), BP + MOVL R15, (BX) + MOVL BP, -4(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(R11*1), BP + MOVQ R15, (BX) + MOVQ BP, -8(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + +copy_5_end: + ADDQ R11, DI + SUBQ R11, R13 + + // Copy match from the current buffer +copy_match: + MOVQ BX, R11 + SUBQ R12, R11 + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, DI + MOVQ BX, R12 + ADDQ R13, BX + +copy_2: + MOVUPS (R11), X0 + MOVUPS X0, (R12) + ADDQ $0x10, R11 + ADDQ $0x10, R12 + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, DI + +copy_slow_3: + MOVB (R11), R12 + MOVB R12, (BX) + INCQ R11 + INCQ BX + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + +loop_finished: + // Return value + MOVB $0x01, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +error_match_off_too_big: + // Return value + MOVB $0x00, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +empty_seqs: + // Return value + MOVB $0x01, ret+8(FP) + RET + +// func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool +// Requires: SSE +TEXT ·sequenceDecs_executeSimple_safe_amd64(SB), $8-9 + MOVQ ctx+0(FP), R10 + MOVQ 8(R10), CX + TESTQ CX, CX + JZ empty_seqs + MOVQ (R10), AX + MOVQ 24(R10), DX + MOVQ 32(R10), BX + MOVQ 80(R10), SI + MOVQ 104(R10), DI + MOVQ 120(R10), R8 + MOVQ 56(R10), R9 + MOVQ 64(R10), R10 + ADDQ R10, R9 + + // seqsBase += 24 * seqIndex + LEAQ (DX)(DX*2), R11 + SHLQ $0x03, R11 + ADDQ R11, AX + + // outBase += outPosition + ADDQ DI, BX + +main_loop: + MOVQ (AX), R11 + MOVQ 16(AX), R12 + MOVQ 8(AX), R13 + + // Copy literals + TESTQ R11, R11 + JZ check_offset + MOVQ R11, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (SI), X0 + MOVUPS X0, (BX) + ADDQ $0x10, SI + ADDQ $0x10, BX + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(SI)(R14*1), SI + LEAQ 16(BX)(R14*1), BX + MOVUPS -16(SI), X0 + MOVUPS X0, -16(BX) + JMP copy_1_end + +copy_1_small: + CMPQ R11, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ R11, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (SI), R14 + MOVB -1(SI)(R11*1), R15 + MOVB R14, (BX) + MOVB R15, -1(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_3: + MOVW (SI), R14 + MOVB 2(SI), R15 + MOVW R14, (BX) + MOVB R15, 2(BX) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_4through7: + MOVL (SI), R14 + MOVL -4(SI)(R11*1), R15 + MOVL R14, (BX) + MOVL R15, -4(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (SI), R14 + MOVQ -8(SI)(R11*1), R15 + MOVQ R14, (BX) + MOVQ R15, -8(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + +copy_1_end: + ADDQ R11, DI + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + LEAQ (DI)(R10*1), R11 + CMPQ R12, R11 + JG error_match_off_too_big + CMPQ R12, R8 + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, R11 + SUBQ DI, R11 + JLS copy_match + MOVQ R9, R14 + SUBQ R11, R14 + CMPQ R13, R11 + JG copy_all_from_history + MOVQ R13, R11 + SUBQ $0x10, R11 + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R11 + JAE copy_4_loop + LEAQ 16(R14)(R11*1), R14 + LEAQ 16(BX)(R11*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), R11 + MOVB 2(R14), R12 + MOVW R11, (BX) + MOVB R12, 2(BX) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), R11 + MOVL -4(R14)(R13*1), R12 + MOVL R11, (BX) + MOVL R12, -4(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), R11 + MOVQ -8(R14)(R13*1), R12 + MOVQ R11, (BX) + MOVQ R12, -8(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + +copy_4_end: + ADDQ R13, DI + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + JMP loop_finished + +copy_all_from_history: + MOVQ R11, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(BX)(R15*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_5_end + +copy_5_small: + CMPQ R11, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ R11, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(R11*1), BP + MOVB R15, (BX) + MOVB BP, -1(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (BX) + MOVB BP, 2(BX) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(R11*1), BP + MOVL R15, (BX) + MOVL BP, -4(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(R11*1), BP + MOVQ R15, (BX) + MOVQ BP, -8(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + +copy_5_end: + ADDQ R11, DI + SUBQ R11, R13 + + // Copy match from the current buffer +copy_match: + MOVQ BX, R11 + SUBQ R12, R11 + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, DI + MOVQ R13, R12 + SUBQ $0x10, R12 + JB copy_2_small + +copy_2_loop: + MOVUPS (R11), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R11 + ADDQ $0x10, BX + SUBQ $0x10, R12 + JAE copy_2_loop + LEAQ 16(R11)(R12*1), R11 + LEAQ 16(BX)(R12*1), BX + MOVUPS -16(R11), X0 + MOVUPS X0, -16(BX) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (R11), R12 + MOVB -1(R11)(R13*1), R14 + MOVB R12, (BX) + MOVB R14, -1(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_3: + MOVW (R11), R12 + MOVB 2(R11), R14 + MOVW R12, (BX) + MOVB R14, 2(BX) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_4through7: + MOVL (R11), R12 + MOVL -4(R11)(R13*1), R14 + MOVL R12, (BX) + MOVL R14, -4(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (R11), R12 + MOVQ -8(R11)(R13*1), R14 + MOVQ R12, (BX) + MOVQ R14, -8(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, DI + +copy_slow_3: + MOVB (R11), R12 + MOVB R12, (BX) + INCQ R11 + INCQ BX + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + +loop_finished: + // Return value + MOVB $0x01, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +error_match_off_too_big: + // Return value + MOVB $0x00, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +empty_seqs: + // Return value + MOVB $0x01, ret+8(FP) + RET + +// func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 24(CX), DX + MOVBQZX 32(CX), BX + MOVQ (CX), AX + MOVQ 8(CX), SI + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + XORQ CX, CX + MOVQ CX, 8(SP) + MOVQ CX, 16(SP) + MOVQ CX, 24(SP) + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_end + +sequenceDecs_decodeSync_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_of_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_of_update_zero: + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_ml_update_zero: + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_2_end + +sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_2_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_2_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_ll_update_zero: + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRL $0x10, DI + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRL $0x10, R8 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRL $0x10, R9 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_amd64_after_adjust + +sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_amd64_after_adjust + +sequenceDecs_decodeSync_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + ADDQ 144(CX)(AX*8), R14 + JNZ sequenceDecs_decodeSync_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_amd64_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (R11)(R14*1), X0 + MOVUPS X0, (R10)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, AX + JB copy_1 + ADDQ AX, R11 + ADDQ AX, R10 + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JG copy_all_from_history + MOVQ R13, AX + SUBQ $0x10, AX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, AX + JAE copy_4_loop + LEAQ 16(R14)(AX*1), R14 + LEAQ 16(R10)(AX*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), AX + MOVB 2(R14), CL + MOVW AX, (R10) + MOVB CL, 2(R10) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), AX + MOVL -4(R14)(R13*1), CX + MOVL AX, (R10) + MOVL CX, -4(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), AX + MOVQ -8(R14)(R13*1), CX + MOVQ AX, (R10) + MOVQ CX, -8(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + +copy_4_end: + ADDQ R13, R12 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ AX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R10)(R15*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_5_end + +copy_5_small: + CMPQ AX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ AX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(AX*1), BP + MOVB R15, (R10) + MOVB BP, -1(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R10) + MOVB BP, 2(R10) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(AX*1), BP + MOVL R15, (R10) + MOVL BP, -4(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(AX*1), BP + MOVQ R15, (R10) + MOVQ BP, -8(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + +copy_5_end: + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R12 + MOVQ R10, CX + ADDQ R13, R10 + +copy_2: + MOVUPS (AX), X0 + MOVUPS X0, (CX) + ADDQ $0x10, AX + ADDQ $0x10, CX + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R12 + +copy_slow_3: + MOVB (AX), CL + MOVB CL, (R10) + INCQ AX + INCQ R10 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 24(AX) + MOVB BL, 32(AX) + MOVQ SI, 8(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 + MOVQ br+8(FP), BX + MOVQ 24(BX), AX + MOVBQZX 32(BX), DX + MOVQ (BX), CX + MOVQ 8(BX), BX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + XORQ R9, R9 + MOVQ R9, 8(SP) + MOVQ R9, 16(SP) + MOVQ R9, 24(SP) + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_end + +sequenceDecs_decodeSync_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_2_end + +sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_2_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_2_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_bmi2_skip_update + LEAQ (SI)(DI*1), R13 + ADDQ R8, R13 + MOVBQZX R13, R13 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + + // Update Offset State + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + SHRL $0x10, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + SHRL $0x10, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R14, CX + SHRL $0x10, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decodeSync_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_bmi2_after_adjust + +sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_bmi2_after_adjust + +sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + ADDQ 144(CX)(R12*8), R14 + JNZ sequenceDecs_decodeSync_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_bmi2_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (R10)(R14*1), X0 + MOVUPS X0, (R9)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, CX + JB copy_1 + ADDQ CX, R10 + ADDQ CX, R9 + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JG copy_all_from_history + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, CX + JAE copy_4_loop + LEAQ 16(R14)(CX*1), R14 + LEAQ 16(R9)(CX*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), CX + MOVB 2(R14), R12 + MOVW CX, (R9) + MOVB R12, 2(R9) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), CX + MOVL -4(R14)(R13*1), R12 + MOVL CX, (R9) + MOVL R12, -4(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), CX + MOVQ -8(R14)(R13*1), R12 + MOVQ CX, (R9) + MOVQ R12, -8(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + +copy_4_end: + ADDQ R13, R11 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ CX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R9)(R15*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_5_end + +copy_5_small: + CMPQ CX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ CX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(CX*1), BP + MOVB R15, (R9) + MOVB BP, -1(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R9) + MOVB BP, 2(R9) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(CX*1), BP + MOVL R15, (R9) + MOVL BP, -4(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(CX*1), BP + MOVQ R15, (R9) + MOVQ BP, -8(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + +copy_5_end: + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R11 + MOVQ R9, R12 + ADDQ R13, R9 + +copy_2: + MOVUPS (CX), X0 + MOVUPS X0, (R12) + ADDQ $0x10, CX + ADDQ $0x10, R12 + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R11 + +copy_slow_3: + MOVB (CX), R12 + MOVB R12, (R9) + INCQ CX + INCQ R9 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 24(CX) + MOVB DL, 32(CX) + MOVQ BX, 8(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 24(CX), DX + MOVBQZX 32(CX), BX + MOVQ (CX), AX + MOVQ 8(CX), SI + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + XORQ CX, CX + MOVQ CX, 8(SP) + MOVQ CX, 16(SP) + MOVQ CX, 24(SP) + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_safe_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_end + +sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_safe_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_of_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_of_update_zero: + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_ml_update_zero: + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_end + +sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_safe_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_ll_update_zero: + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRL $0x10, DI + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRL $0x10, R8 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRL $0x10, R9 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_safe_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_amd64_after_adjust + +sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_amd64_after_adjust + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + ADDQ 144(CX)(AX*8), R14 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_safe_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_amd64_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + MOVQ AX, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (R11), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R11 + ADDQ $0x10, R10 + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(R11)(R14*1), R11 + LEAQ 16(R10)(R14*1), R10 + MOVUPS -16(R11), X0 + MOVUPS X0, -16(R10) + JMP copy_1_end + +copy_1_small: + CMPQ AX, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ AX, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (R11), R14 + MOVB -1(R11)(AX*1), R15 + MOVB R14, (R10) + MOVB R15, -1(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_3: + MOVW (R11), R14 + MOVB 2(R11), R15 + MOVW R14, (R10) + MOVB R15, 2(R10) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_4through7: + MOVL (R11), R14 + MOVL -4(R11)(AX*1), R15 + MOVL R14, (R10) + MOVL R15, -4(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (R11), R14 + MOVQ -8(R11)(AX*1), R15 + MOVQ R14, (R10) + MOVQ R15, -8(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + +copy_1_end: + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JG copy_all_from_history + MOVQ R13, AX + SUBQ $0x10, AX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, AX + JAE copy_4_loop + LEAQ 16(R14)(AX*1), R14 + LEAQ 16(R10)(AX*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), AX + MOVB 2(R14), CL + MOVW AX, (R10) + MOVB CL, 2(R10) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), AX + MOVL -4(R14)(R13*1), CX + MOVL AX, (R10) + MOVL CX, -4(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), AX + MOVQ -8(R14)(R13*1), CX + MOVQ AX, (R10) + MOVQ CX, -8(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + +copy_4_end: + ADDQ R13, R12 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ AX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R10)(R15*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_5_end + +copy_5_small: + CMPQ AX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ AX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(AX*1), BP + MOVB R15, (R10) + MOVB BP, -1(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R10) + MOVB BP, 2(R10) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(AX*1), BP + MOVL R15, (R10) + MOVL BP, -4(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(AX*1), BP + MOVQ R15, (R10) + MOVQ BP, -8(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + +copy_5_end: + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R12 + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_2_small + +copy_2_loop: + MOVUPS (AX), X0 + MOVUPS X0, (R10) + ADDQ $0x10, AX + ADDQ $0x10, R10 + SUBQ $0x10, CX + JAE copy_2_loop + LEAQ 16(AX)(CX*1), AX + LEAQ 16(R10)(CX*1), R10 + MOVUPS -16(AX), X0 + MOVUPS X0, -16(R10) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (AX), CL + MOVB -1(AX)(R13*1), R14 + MOVB CL, (R10) + MOVB R14, -1(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_3: + MOVW (AX), CX + MOVB 2(AX), R14 + MOVW CX, (R10) + MOVB R14, 2(R10) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_4through7: + MOVL (AX), CX + MOVL -4(AX)(R13*1), R14 + MOVL CX, (R10) + MOVL R14, -4(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (AX), CX + MOVQ -8(AX)(R13*1), R14 + MOVQ CX, (R10) + MOVQ R14, -8(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R12 + +copy_slow_3: + MOVB (AX), CL + MOVB CL, (R10) + INCQ AX + INCQ R10 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_safe_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 24(AX) + MOVB BL, 32(AX) + MOVQ SI, 8(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 + MOVQ br+8(FP), BX + MOVQ 24(BX), AX + MOVBQZX 32(BX), DX + MOVQ (BX), CX + MOVQ 8(BX), BX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + XORQ R9, R9 + MOVQ R9, 8(SP) + MOVQ R9, 16(SP) + MOVQ R9, 24(SP) + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_safe_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_end + +sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_safe_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_end + +sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_safe_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_bmi2_skip_update + LEAQ (SI)(DI*1), R13 + ADDQ R8, R13 + MOVBQZX R13, R13 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + + // Update Offset State + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + SHRL $0x10, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + SHRL $0x10, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R14, CX + SHRL $0x10, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decodeSync_safe_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust + +sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + ADDQ 144(CX)(R12*8), R14 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_safe_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_bmi2_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + MOVQ CX, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (R10), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R10 + ADDQ $0x10, R9 + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(R10)(R14*1), R10 + LEAQ 16(R9)(R14*1), R9 + MOVUPS -16(R10), X0 + MOVUPS X0, -16(R9) + JMP copy_1_end + +copy_1_small: + CMPQ CX, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ CX, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (R10), R14 + MOVB -1(R10)(CX*1), R15 + MOVB R14, (R9) + MOVB R15, -1(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_3: + MOVW (R10), R14 + MOVB 2(R10), R15 + MOVW R14, (R9) + MOVB R15, 2(R9) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_4through7: + MOVL (R10), R14 + MOVL -4(R10)(CX*1), R15 + MOVL R14, (R9) + MOVL R15, -4(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (R10), R14 + MOVQ -8(R10)(CX*1), R15 + MOVQ R14, (R9) + MOVQ R15, -8(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + +copy_1_end: + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JG copy_all_from_history + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, CX + JAE copy_4_loop + LEAQ 16(R14)(CX*1), R14 + LEAQ 16(R9)(CX*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), CX + MOVB 2(R14), R12 + MOVW CX, (R9) + MOVB R12, 2(R9) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), CX + MOVL -4(R14)(R13*1), R12 + MOVL CX, (R9) + MOVL R12, -4(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), CX + MOVQ -8(R14)(R13*1), R12 + MOVQ CX, (R9) + MOVQ R12, -8(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + +copy_4_end: + ADDQ R13, R11 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ CX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R9)(R15*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_5_end + +copy_5_small: + CMPQ CX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ CX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(CX*1), BP + MOVB R15, (R9) + MOVB BP, -1(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R9) + MOVB BP, 2(R9) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(CX*1), BP + MOVL R15, (R9) + MOVL BP, -4(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(CX*1), BP + MOVQ R15, (R9) + MOVQ BP, -8(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + +copy_5_end: + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R11 + MOVQ R13, R12 + SUBQ $0x10, R12 + JB copy_2_small + +copy_2_loop: + MOVUPS (CX), X0 + MOVUPS X0, (R9) + ADDQ $0x10, CX + ADDQ $0x10, R9 + SUBQ $0x10, R12 + JAE copy_2_loop + LEAQ 16(CX)(R12*1), CX + LEAQ 16(R9)(R12*1), R9 + MOVUPS -16(CX), X0 + MOVUPS X0, -16(R9) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (CX), R12 + MOVB -1(CX)(R13*1), R14 + MOVB R12, (R9) + MOVB R14, -1(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_3: + MOVW (CX), R12 + MOVB 2(CX), R14 + MOVW R12, (R9) + MOVB R14, 2(R9) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_4through7: + MOVL (CX), R12 + MOVL -4(CX)(R13*1), R14 + MOVL R12, (R9) + MOVL R14, -4(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (CX), R12 + MOVQ -8(CX)(R13*1), R14 + MOVQ R12, (R9) + MOVQ R14, -8(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R11 + +copy_slow_3: + MOVB (CX), R12 + MOVB R12, (R9) + INCQ CX + INCQ R9 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_safe_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 24(CX) + MOVB DL, 32(CX) + MOVQ BX, 8(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go new file mode 100644 index 0000000000000..2fb35b788c149 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go @@ -0,0 +1,237 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package zstd + +import ( + "fmt" + "io" +) + +// decode sequences from the stream with the provided history but without dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + return false, nil +} + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + // Grab full sizes tables, to avoid bounds checks. + llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] + llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + s.seqSize = 0 + litRemain := len(s.literals) + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + for i := range seqs { + var ll, mo, ml int + if len(br.in) > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("WARNING: temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + if br.overread() { + if debugDecoder { + printf("reading sequence %d, exceeded available data\n", i) + } + return io.ErrUnexpectedEOF + } + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + // Evaluate. + // We might be doing this async, so do it early. + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + s.seqSize += ll + ml + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + litRemain -= ll + if litRemain < 0 { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll) + } + seqs[i] = seqVals{ + ll: ll, + ml: ml, + mo: mo, + } + if i == len(seqs)-1 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.get32BitsFast(nBits) + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + s.seqSize += litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// executeSimple handles cases when a dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize > cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Malformed input + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into the current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + + // We must be in the current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqenc.go b/vendor/github.com/klauspost/compress/zstd/seqenc.go new file mode 100644 index 0000000000000..8014174a7713e --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqenc.go @@ -0,0 +1,114 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "math/bits" + +type seqCoders struct { + llEnc, ofEnc, mlEnc *fseEncoder + llPrev, ofPrev, mlPrev *fseEncoder +} + +// swap coders with another (block). +func (s *seqCoders) swap(other *seqCoders) { + *s, *other = *other, *s +} + +// setPrev will update the previous encoders to the actually used ones +// and make sure a fresh one is in the main slot. +func (s *seqCoders) setPrev(ll, ml, of *fseEncoder) { + compareSwap := func(used *fseEncoder, current, prev **fseEncoder) { + // We used the new one, more current to history and reuse the previous history + if *current == used { + *prev, *current = *current, *prev + c := *current + p := *prev + c.reUsed = false + p.reUsed = true + return + } + if used == *prev { + return + } + // Ensure we cannot reuse by accident + prevEnc := *prev + prevEnc.symbolLen = 0 + } + compareSwap(ll, &s.llEnc, &s.llPrev) + compareSwap(ml, &s.mlEnc, &s.mlPrev) + compareSwap(of, &s.ofEnc, &s.ofPrev) +} + +func highBit(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} + +var llCodeTable = [64]byte{0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 16, 17, 17, 18, 18, 19, 19, + 20, 20, 20, 20, 21, 21, 21, 21, + 22, 22, 22, 22, 22, 22, 22, 22, + 23, 23, 23, 23, 23, 23, 23, 23, + 24, 24, 24, 24, 24, 24, 24, 24, + 24, 24, 24, 24, 24, 24, 24, 24} + +// Up to 6 bits +const maxLLCode = 35 + +// llBitsTable translates from ll code to number of bits. +var llBitsTable = [maxLLCode + 1]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 2, 2, 3, 3, + 4, 6, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16} + +// llCode returns the code that represents the literal length requested. +func llCode(litLength uint32) uint8 { + const llDeltaCode = 19 + if litLength <= 63 { + // Compiler insists on bounds check (Go 1.12) + return llCodeTable[litLength&63] + } + return uint8(highBit(litLength)) + llDeltaCode +} + +var mlCodeTable = [128]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, + 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, + 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, + 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42} + +// Up to 6 bits +const maxMLCode = 52 + +// mlBitsTable translates from ml code to number of bits. +var mlBitsTable = [maxMLCode + 1]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 2, 2, 3, 3, + 4, 4, 5, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16} + +// note : mlBase = matchLength - MINMATCH; +// because it's the format it's stored in seqStore->sequences +func mlCode(mlBase uint32) uint8 { + const mlDeltaCode = 36 + if mlBase <= 127 { + // Compiler insists on bounds check (Go 1.12) + return mlCodeTable[mlBase&127] + } + return uint8(highBit(mlBase)) + mlDeltaCode +} + +func ofCode(offset uint32) uint8 { + // A valid offset will always be > 0. + return uint8(bits.Len32(offset) - 1) +} diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go new file mode 100644 index 0000000000000..ec13594e89b0e --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/snappy.go @@ -0,0 +1,434 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "hash/crc32" + "io" + + "github.com/klauspost/compress/huff0" + snappy "github.com/klauspost/compress/internal/snapref" +) + +const ( + snappyTagLiteral = 0x00 + snappyTagCopy1 = 0x01 + snappyTagCopy2 = 0x02 + snappyTagCopy4 = 0x03 +) + +const ( + snappyChecksumSize = 4 + snappyMagicBody = "sNaPpY" + + // snappyMaxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + snappyMaxBlockSize = 65536 + + // snappyMaxEncodedLenOfMaxBlockSize equals MaxEncodedLen(snappyMaxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + snappyMaxEncodedLenOfMaxBlockSize = 76490 +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var ( + // ErrSnappyCorrupt reports that the input is invalid. + ErrSnappyCorrupt = errors.New("snappy: corrupt input") + // ErrSnappyTooLarge reports that the uncompressed length is too large. + ErrSnappyTooLarge = errors.New("snappy: decoded block is too large") + // ErrSnappyUnsupported reports that the input isn't supported. + ErrSnappyUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// SnappyConverter can read SnappyConverter-compressed streams and convert them to zstd. +// Conversion is done by converting the stream directly from Snappy without intermediate +// full decoding. +// Therefore the compression ratio is much less than what can be done by a full decompression +// and compression, and a faulty Snappy stream may lead to a faulty Zstandard stream without +// any errors being generated. +// No CRC value is being generated and not all CRC values of the Snappy stream are checked. +// However, it provides really fast recompression of Snappy streams. +// The converter can be reused to avoid allocations, even after errors. +type SnappyConverter struct { + r io.Reader + err error + buf []byte + block *blockEnc +} + +// Convert the Snappy stream supplied in 'in' and write the zStandard stream to 'w'. +// If any error is detected on the Snappy stream it is returned. +// The number of bytes written is returned. +func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { + initPredefined() + r.err = nil + r.r = in + if r.block == nil { + r.block = &blockEnc{} + r.block.init() + } + r.block.initNewEncode() + if len(r.buf) != snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize { + r.buf = make([]byte, snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize) + } + r.block.litEnc.Reuse = huff0.ReusePolicyNone + var written int64 + var readHeader bool + { + header := frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0]) + + var n int + n, r.err = w.Write(header) + if r.err != nil { + return written, r.err + } + written += int64(n) + } + + for { + if !r.readFull(r.buf[:4], true) { + // Add empty last block + r.block.reset(nil) + r.block.last = true + err := r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + n, err := w.Write(r.block.output) + if err != nil { + return written, err + } + written += int64(n) + + return written, r.err + } + chunkType := r.buf[0] + if !readHeader { + if chunkType != chunkTypeStreamIdentifier { + println("chunkType != chunkTypeStreamIdentifier", chunkType) + r.err = ErrSnappyCorrupt + return written, r.err + } + readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + println("chunkLen > len(r.buf)", chunkType) + r.err = ErrSnappyUnsupported + return written, r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < snappyChecksumSize { + println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return written, r.err + } + //checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[snappyChecksumSize:] + + n, hdr, err := snappyDecodedLen(buf) + if err != nil { + r.err = err + return written, r.err + } + buf = buf[hdr:] + if n > snappyMaxBlockSize { + println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.reset(nil) + r.block.pushOffsets() + if err := decodeSnappy(r.block, buf); err != nil { + r.err = err + return written, r.err + } + if r.block.size+r.block.extraLits != n { + printf("invalid size, want %d, got %d\n", n, r.block.size+r.block.extraLits) + r.err = ErrSnappyCorrupt + return written, r.err + } + err = r.block.encode(nil, false, false) + switch err { + case errIncompressible: + r.block.popOffsets() + r.block.reset(nil) + r.block.literals, err = snappy.Decode(r.block.literals[:n], r.buf[snappyChecksumSize:chunkLen]) + if err != nil { + return written, err + } + err = r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + case nil: + default: + return written, err + } + + n, r.err = w.Write(r.block.output) + if r.err != nil { + return written, err + } + written += int64(n) + continue + case chunkTypeUncompressedData: + if debugEncoder { + println("Uncompressed, chunklen", chunkLen) + } + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < snappyChecksumSize { + println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.reset(nil) + buf := r.buf[:snappyChecksumSize] + if !r.readFull(buf, false) { + return written, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - snappyChecksumSize + if n > snappyMaxBlockSize { + println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.literals = r.block.literals[:n] + if !r.readFull(r.block.literals, false) { + return written, r.err + } + if snappyCRC(r.block.literals) != checksum { + println("literals crc mismatch") + r.err = ErrSnappyCorrupt + return written, r.err + } + err := r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + n, r.err = w.Write(r.block.output) + if r.err != nil { + return written, err + } + written += int64(n) + continue + + case chunkTypeStreamIdentifier: + if debugEncoder { + println("stream id", chunkLen, len(snappyMagicBody)) + } + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(snappyMagicBody) { + println("chunkLen != len(snappyMagicBody)", chunkLen, len(snappyMagicBody)) + r.err = ErrSnappyCorrupt + return written, r.err + } + if !r.readFull(r.buf[:len(snappyMagicBody)], false) { + return written, r.err + } + for i := 0; i < len(snappyMagicBody); i++ { + if r.buf[i] != snappyMagicBody[i] { + println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i) + r.err = ErrSnappyCorrupt + return written, r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + println("chunkType <= 0x7f") + r.err = ErrSnappyUnsupported + return written, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return written, r.err + } + } +} + +// decodeSnappy writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read. +func decodeSnappy(blk *blockEnc, src []byte) error { + //decodeRef(make([]byte, snappyMaxBlockSize), src) + var s, length int + lits := blk.extraLits + var offset uint32 + for s < len(src) { + switch src[s] & 0x03 { + case snappyTagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + if x > snappyMaxBlockSize { + println("x > snappyMaxBlockSize", x, snappyMaxBlockSize) + return ErrSnappyCorrupt + } + length = int(x) + 1 + if length <= 0 { + println("length <= 0 ", length) + + return errUnsupportedLiteralLength + } + //if length > snappyMaxBlockSize-d || uint32(length) > len(src)-s { + // return ErrSnappyCorrupt + //} + + blk.literals = append(blk.literals, src[s:s+length]...) + //println(length, "litLen") + lits += length + s += length + continue + + case snappyTagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]) + + case snappyTagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = uint32(src[s-2]) | uint32(src[s-1])<<8 + + case snappyTagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + + if offset <= 0 || blk.size+lits < int(offset) /*|| length > len(blk)-d */ { + println("offset <= 0 || blk.size+lits < int(offset)", offset, blk.size+lits, int(offset), blk.size, lits) + + return ErrSnappyCorrupt + } + + // Check if offset is one of the recent offsets. + // Adjusts the output offset accordingly. + // Gives a tiny bit of compression, typically around 1%. + if false { + offset = blk.matchOffset(offset, uint32(lits)) + } else { + offset += 3 + } + + blk.sequences = append(blk.sequences, seq{ + litLen: uint32(lits), + offset: offset, + matchLen: uint32(length) - zstdMinMatch, + }) + blk.size += length + lits + lits = 0 + } + blk.extraLits = lits + return nil +} + +func (r *SnappyConverter) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrSnappyCorrupt + } + return false + } + return true +} + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func snappyCRC(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return c>>15 | c<<17 + 0xa282ead8 +} + +// snappyDecodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func snappyDecodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrSnappyCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrSnappyTooLarge + } + return int(v), n, nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go new file mode 100644 index 0000000000000..29c15c8c4efe5 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/zip.go @@ -0,0 +1,141 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "errors" + "io" + "sync" +) + +// ZipMethodWinZip is the method for Zstandard compressed data inside Zip files for WinZip. +// See https://www.winzip.com/win/en/comp_info.html +const ZipMethodWinZip = 93 + +// ZipMethodPKWare is the original method number used by PKWARE to indicate Zstandard compression. +// Deprecated: This has been deprecated by PKWARE, use ZipMethodWinZip instead for compression. +// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT +const ZipMethodPKWare = 20 + +// zipReaderPool is the default reader pool. +var zipReaderPool = sync.Pool{New: func() interface{} { + z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1)) + if err != nil { + panic(err) + } + return z +}} + +// newZipReader creates a pooled zip decompressor. +func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser { + pool := &zipReaderPool + if len(opts) > 0 { + opts = append([]DOption{WithDecoderLowmem(true), WithDecoderMaxWindow(128 << 20)}, opts...) + // Force concurrency 1 + opts = append(opts, WithDecoderConcurrency(1)) + // Create our own pool + pool = &sync.Pool{} + } + return func(r io.Reader) io.ReadCloser { + dec, ok := pool.Get().(*Decoder) + if ok { + dec.Reset(r) + } else { + d, err := NewReader(r, opts...) + if err != nil { + panic(err) + } + dec = d + } + return &pooledZipReader{dec: dec, pool: pool} + } +} + +type pooledZipReader struct { + mu sync.Mutex // guards Close and Read + pool *sync.Pool + dec *Decoder +} + +func (r *pooledZipReader) Read(p []byte) (n int, err error) { + r.mu.Lock() + defer r.mu.Unlock() + if r.dec == nil { + return 0, errors.New("read after close or EOF") + } + dec, err := r.dec.Read(p) + if err == io.EOF { + r.dec.Reset(nil) + r.pool.Put(r.dec) + r.dec = nil + } + return dec, err +} + +func (r *pooledZipReader) Close() error { + r.mu.Lock() + defer r.mu.Unlock() + var err error + if r.dec != nil { + err = r.dec.Reset(nil) + r.pool.Put(r.dec) + r.dec = nil + } + return err +} + +type pooledZipWriter struct { + mu sync.Mutex // guards Close and Read + enc *Encoder + pool *sync.Pool +} + +func (w *pooledZipWriter) Write(p []byte) (n int, err error) { + w.mu.Lock() + defer w.mu.Unlock() + if w.enc == nil { + return 0, errors.New("Write after Close") + } + return w.enc.Write(p) +} + +func (w *pooledZipWriter) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + var err error + if w.enc != nil { + err = w.enc.Close() + w.pool.Put(w.enc) + w.enc = nil + } + return err +} + +// ZipCompressor returns a compressor that can be registered with zip libraries. +// The provided encoder options will be used on all encodes. +func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) { + var pool sync.Pool + return func(w io.Writer) (io.WriteCloser, error) { + enc, ok := pool.Get().(*Encoder) + if ok { + enc.Reset(w) + } else { + var err error + enc, err = NewWriter(w, opts...) + if err != nil { + return nil, err + } + } + return &pooledZipWriter{enc: enc, pool: &pool}, nil + } +} + +// ZipDecompressor returns a decompressor that can be registered with zip libraries. +// See ZipCompressor for example. +// Options can be specified. WithDecoderConcurrency(1) is forced, +// and by default a 128MB maximum decompression window is specified. +// The window size can be overridden if required. +func ZipDecompressor(opts ...DOption) func(r io.Reader) io.ReadCloser { + return newZipReader(opts...) +} diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go new file mode 100644 index 0000000000000..4be7cc73671b3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -0,0 +1,121 @@ +// Package zstd provides decompression of zstandard files. +// +// For advanced usage and examples, go to the README: https://github.com/klauspost/compress/tree/master/zstd#zstd +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "log" + "math" +) + +// enable debug printing +const debug = false + +// enable encoding debug printing +const debugEncoder = debug + +// enable decoding debug printing +const debugDecoder = debug + +// Enable extra assertions. +const debugAsserts = debug || false + +// print sequence details +const debugSequences = false + +// print detailed matching information +const debugMatches = false + +// force encoder to use predefined tables. +const forcePreDef = false + +// zstdMinMatch is the minimum zstd match length. +const zstdMinMatch = 3 + +// fcsUnknown is used for unknown frame content size. +const fcsUnknown = math.MaxUint64 + +var ( + // ErrReservedBlockType is returned when a reserved block type is found. + // Typically this indicates wrong or corrupted input. + ErrReservedBlockType = errors.New("invalid input: reserved block type encountered") + + // ErrCompressedSizeTooBig is returned when a block is bigger than allowed. + // Typically this indicates wrong or corrupted input. + ErrCompressedSizeTooBig = errors.New("invalid input: compressed size too big") + + // ErrBlockTooSmall is returned when a block is too small to be decoded. + // Typically returned on invalid input. + ErrBlockTooSmall = errors.New("block too small") + + // ErrUnexpectedBlockSize is returned when a block has unexpected size. + // Typically returned on invalid input. + ErrUnexpectedBlockSize = errors.New("unexpected block size") + + // ErrMagicMismatch is returned when a "magic" number isn't what is expected. + // Typically this indicates wrong or corrupted input. + ErrMagicMismatch = errors.New("invalid input: magic number mismatch") + + // ErrWindowSizeExceeded is returned when a reference exceeds the valid window size. + // Typically this indicates wrong or corrupted input. + ErrWindowSizeExceeded = errors.New("window size exceeded") + + // ErrWindowSizeTooSmall is returned when no window size is specified. + // Typically this indicates wrong or corrupted input. + ErrWindowSizeTooSmall = errors.New("invalid input: window size was too small") + + // ErrDecoderSizeExceeded is returned if decompressed size exceeds the configured limit. + ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit") + + // ErrUnknownDictionary is returned if the dictionary ID is unknown. + ErrUnknownDictionary = errors.New("unknown dictionary") + + // ErrFrameSizeExceeded is returned if the stated frame size is exceeded. + // This is only returned if SingleSegment is specified on the frame. + ErrFrameSizeExceeded = errors.New("frame size exceeded") + + // ErrFrameSizeMismatch is returned if the stated frame size does not match the expected size. + // This is only returned if SingleSegment is specified on the frame. + ErrFrameSizeMismatch = errors.New("frame size does not match size on stream") + + // ErrCRCMismatch is returned if CRC mismatches. + ErrCRCMismatch = errors.New("CRC check failed") + + // ErrDecoderClosed will be returned if the Decoder was used after + // Close has been called. + ErrDecoderClosed = errors.New("decoder used after Close") + + // ErrDecoderNilInput is returned when a nil Reader was provided + // and an operation other than Reset/DecodeAll/Close was attempted. + ErrDecoderNilInput = errors.New("nil input provided as reader") +) + +func println(a ...interface{}) { + if debug || debugDecoder || debugEncoder { + log.Println(a...) + } +} + +func printf(format string, a ...interface{}) { + if debug || debugDecoder || debugEncoder { + log.Printf(format, a...) + } +} + +func load3232(b []byte, i int32) uint32 { + return binary.LittleEndian.Uint32(b[:len(b):len(b)][i:]) +} + +func load6432(b []byte, i int32) uint64 { + return binary.LittleEndian.Uint64(b[:len(b):len(b)][i:]) +} + +type byter interface { + Bytes() []byte + Len() int +} + +var _ byter = &bytes.Buffer{} diff --git a/vendor/go.etcd.io/etcd/raft/v3/LICENSE b/vendor/github.com/kylelemons/godebug/LICENSE similarity index 100% rename from vendor/go.etcd.io/etcd/raft/v3/LICENSE rename to vendor/github.com/kylelemons/godebug/LICENSE diff --git a/vendor/github.com/kylelemons/godebug/diff/diff.go b/vendor/github.com/kylelemons/godebug/diff/diff.go new file mode 100644 index 0000000000000..200e596c6259b --- /dev/null +++ b/vendor/github.com/kylelemons/godebug/diff/diff.go @@ -0,0 +1,186 @@ +// Copyright 2013 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package diff implements a linewise diff algorithm. +package diff + +import ( + "bytes" + "fmt" + "strings" +) + +// Chunk represents a piece of the diff. A chunk will not have both added and +// deleted lines. Equal lines are always after any added or deleted lines. +// A Chunk may or may not have any lines in it, especially for the first or last +// chunk in a computation. +type Chunk struct { + Added []string + Deleted []string + Equal []string +} + +func (c *Chunk) empty() bool { + return len(c.Added) == 0 && len(c.Deleted) == 0 && len(c.Equal) == 0 +} + +// Diff returns a string containing a line-by-line unified diff of the linewise +// changes required to make A into B. Each line is prefixed with '+', '-', or +// ' ' to indicate if it should be added, removed, or is correct respectively. +func Diff(A, B string) string { + aLines := strings.Split(A, "\n") + bLines := strings.Split(B, "\n") + + chunks := DiffChunks(aLines, bLines) + + buf := new(bytes.Buffer) + for _, c := range chunks { + for _, line := range c.Added { + fmt.Fprintf(buf, "+%s\n", line) + } + for _, line := range c.Deleted { + fmt.Fprintf(buf, "-%s\n", line) + } + for _, line := range c.Equal { + fmt.Fprintf(buf, " %s\n", line) + } + } + return strings.TrimRight(buf.String(), "\n") +} + +// DiffChunks uses an O(D(N+M)) shortest-edit-script algorithm +// to compute the edits required from A to B and returns the +// edit chunks. +func DiffChunks(a, b []string) []Chunk { + // algorithm: http://www.xmailserver.org/diff2.pdf + + // We'll need these quantities a lot. + alen, blen := len(a), len(b) // M, N + + // At most, it will require len(a) deletions and len(b) additions + // to transform a into b. + maxPath := alen + blen // MAX + if maxPath == 0 { + // degenerate case: two empty lists are the same + return nil + } + + // Store the endpoint of the path for diagonals. + // We store only the a index, because the b index on any diagonal + // (which we know during the loop below) is aidx-diag. + // endpoint[maxPath] represents the 0 diagonal. + // + // Stated differently: + // endpoint[d] contains the aidx of a furthest reaching path in diagonal d + endpoint := make([]int, 2*maxPath+1) // V + + saved := make([][]int, 0, 8) // Vs + save := func() { + dup := make([]int, len(endpoint)) + copy(dup, endpoint) + saved = append(saved, dup) + } + + var editDistance int // D +dLoop: + for editDistance = 0; editDistance <= maxPath; editDistance++ { + // The 0 diag(onal) represents equality of a and b. Each diagonal to + // the left is numbered one lower, to the right is one higher, from + // -alen to +blen. Negative diagonals favor differences from a, + // positive diagonals favor differences from b. The edit distance to a + // diagonal d cannot be shorter than d itself. + // + // The iterations of this loop cover either odds or evens, but not both, + // If odd indices are inputs, even indices are outputs and vice versa. + for diag := -editDistance; diag <= editDistance; diag += 2 { // k + var aidx int // x + switch { + case diag == -editDistance: + // This is a new diagonal; copy from previous iter + aidx = endpoint[maxPath-editDistance+1] + 0 + case diag == editDistance: + // This is a new diagonal; copy from previous iter + aidx = endpoint[maxPath+editDistance-1] + 1 + case endpoint[maxPath+diag+1] > endpoint[maxPath+diag-1]: + // diagonal d+1 was farther along, so use that + aidx = endpoint[maxPath+diag+1] + 0 + default: + // diagonal d-1 was farther (or the same), so use that + aidx = endpoint[maxPath+diag-1] + 1 + } + // On diagonal d, we can compute bidx from aidx. + bidx := aidx - diag // y + // See how far we can go on this diagonal before we find a difference. + for aidx < alen && bidx < blen && a[aidx] == b[bidx] { + aidx++ + bidx++ + } + // Store the end of the current edit chain. + endpoint[maxPath+diag] = aidx + // If we've found the end of both inputs, we're done! + if aidx >= alen && bidx >= blen { + save() // save the final path + break dLoop + } + } + save() // save the current path + } + if editDistance == 0 { + return nil + } + chunks := make([]Chunk, editDistance+1) + + x, y := alen, blen + for d := editDistance; d > 0; d-- { + endpoint := saved[d] + diag := x - y + insert := diag == -d || (diag != d && endpoint[maxPath+diag-1] < endpoint[maxPath+diag+1]) + + x1 := endpoint[maxPath+diag] + var x0, xM, kk int + if insert { + kk = diag + 1 + x0 = endpoint[maxPath+kk] + xM = x0 + } else { + kk = diag - 1 + x0 = endpoint[maxPath+kk] + xM = x0 + 1 + } + y0 := x0 - kk + + var c Chunk + if insert { + c.Added = b[y0:][:1] + } else { + c.Deleted = a[x0:][:1] + } + if xM < x1 { + c.Equal = a[xM:][:x1-xM] + } + + x, y = x0, y0 + chunks[d] = c + } + if x > 0 { + chunks[0].Equal = a[:x] + } + if chunks[0].empty() { + chunks = chunks[1:] + } + if len(chunks) == 0 { + return nil + } + return chunks +} diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE index dd878a30ee9a6..b9cc55abbb0ac 100644 --- a/vendor/github.com/prometheus/client_golang/NOTICE +++ b/vendor/github.com/prometheus/client_golang/NOTICE @@ -16,8 +16,3 @@ Go support for Protocol Buffers - Google's data interchange format http://github.com/golang/protobuf/ Copyright 2010 The Go Authors See source code for license details. - -Support for streaming Protocol Buffer messages for the Go language (golang). -https://github.com/matttproud/golang_protobuf_extensions -Copyright 2013 Matt T. Proud -Licensed under the Apache License, Version 2.0 diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE new file mode 100644 index 0000000000000..65d761bc9f28c --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go new file mode 100644 index 0000000000000..8547c8dfd1871 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go @@ -0,0 +1,145 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +// Package header provides functions for parsing HTTP headers. +package header + +import ( + "net/http" + "strings" +) + +// Octet types from RFC 2616. +var octetTypes [256]octetType + +type octetType byte + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) + if strings.ContainsRune(" \t\r\n", rune(c)) { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +// AcceptSpec describes an Accept* header. +type AcceptSpec struct { + Value string + Q float64 +} + +// ParseAccept parses Accept* headers. +func ParseAccept(header http.Header, key string) (specs []AcceptSpec) { +loop: + for _, s := range header[key] { + for { + var spec AcceptSpec + spec.Value, s = expectTokenSlash(s) + if spec.Value == "" { + continue loop + } + spec.Q = 1.0 + s = skipSpace(s) + if strings.HasPrefix(s, ";") { + s = skipSpace(s[1:]) + if !strings.HasPrefix(s, "q=") { + continue loop + } + spec.Q, s = expectQuality(s[2:]) + if spec.Q < 0.0 { + continue loop + } + } + specs = append(specs, spec) + s = skipSpace(s) + if !strings.HasPrefix(s, ",") { + continue loop + } + s = skipSpace(s[1:]) + } + } + return +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectTokenSlash(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + b := s[i] + if (octetTypes[b]&isToken == 0) && b != '/' { + break + } + } + return s[:i], s[i:] +} + +func expectQuality(s string) (q float64, rest string) { + switch { + case len(s) == 0: + return -1, "" + case s[0] == '0': + q = 0 + case s[0] == '1': + q = 1 + default: + return -1, "" + } + s = s[1:] + if !strings.HasPrefix(s, ".") { + return q, s + } + s = s[1:] + i := 0 + n := 0 + d := 1 + for ; i < len(s); i++ { + b := s[i] + if b < '0' || b > '9' { + break + } + n = n*10 + int(b) - '0' + d *= 10 + } + return q + float64(n)/float64(d), s[i:] +} diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go new file mode 100644 index 0000000000000..2e45780b74b94 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go @@ -0,0 +1,36 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +package httputil + +import ( + "net/http" + + "github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header" +) + +// NegotiateContentEncoding returns the best offered content encoding for the +// request's Accept-Encoding header. If two offers match with equal weight and +// then the offer earlier in the list is preferred. If no offers are +// acceptable, then "" is returned. +func NegotiateContentEncoding(r *http.Request, offers []string) string { + bestOffer := "identity" + bestQ := -1.0 + specs := header.ParseAccept(r.Header, "Accept-Encoding") + for _, offer := range offers { + for _, spec := range specs { + if spec.Q > bestQ && + (spec.Value == "*" || spec.Value == offer) { + bestQ = spec.Q + bestOffer = offer + } + } + } + if bestQ == 0 { + bestOffer = "" + } + return bestOffer +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go index bcfa4fa10e014..cc4ef1077e8cf 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go @@ -37,6 +37,9 @@ var ( // MetricsScheduler allows only scheduler metrics to be collected from Go runtime. // e.g. go_sched_goroutines_goroutines MetricsScheduler = GoRuntimeMetricsRule{regexp.MustCompile(`^/sched/.*`)} + // MetricsDebug allows only debug metrics to be collected from Go runtime. + // e.g. go_godebug_non_default_behavior_gocachetest_events_total + MetricsDebug = GoRuntimeMetricsRule{regexp.MustCompile(`^/godebug/.*`)} ) // WithGoCollectorMemStatsMetricsDisabled disables metrics that is gathered in runtime.MemStats structure such as: @@ -44,7 +47,6 @@ var ( // go_memstats_alloc_bytes // go_memstats_alloc_bytes_total // go_memstats_sys_bytes -// go_memstats_lookups_total // go_memstats_mallocs_total // go_memstats_frees_total // go_memstats_heap_alloc_bytes diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go index ad9a71a5e0d42..520cbd7d418f3 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -22,13 +22,13 @@ import ( // goRuntimeMemStats provides the metrics initially provided by runtime.ReadMemStats. // From Go 1.17 those similar (and better) statistics are provided by runtime/metrics, so // while eval closure works on runtime.MemStats, the struct from Go 1.17+ is -// populated using runtime/metrics. +// populated using runtime/metrics. Those are the defaults we can't alter. func goRuntimeMemStats() memStatsMetrics { return memStatsMetrics{ { desc: NewDesc( memstatNamespace("alloc_bytes"), - "Number of bytes allocated and still in use.", + "Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) }, @@ -36,7 +36,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("alloc_bytes_total"), - "Total number of bytes allocated, even if freed.", + "Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) }, @@ -44,23 +44,16 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("sys_bytes"), - "Number of bytes obtained from system.", + "Number of bytes obtained from system. Equals to /memory/classes/total:byte.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("lookups_total"), - "Total number of pointer lookups.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) }, - valType: CounterValue, }, { desc: NewDesc( memstatNamespace("mallocs_total"), - "Total number of mallocs.", + // TODO(bwplotka): We could add go_memstats_heap_objects, probably useful for discovery. Let's gather more feedback, kind of a waste of bytes for everybody for compatibility reasons to keep both, and we can't really rename/remove useful metric. + "Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) }, @@ -68,7 +61,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("frees_total"), - "Total number of frees.", + "Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) }, @@ -76,7 +69,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_alloc_bytes"), - "Number of heap bytes allocated and still in use.", + "Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) }, @@ -84,7 +77,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_sys_bytes"), - "Number of heap bytes obtained from system.", + "Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) }, @@ -92,7 +85,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_idle_bytes"), - "Number of heap bytes waiting to be used.", + "Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) }, @@ -100,7 +93,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_inuse_bytes"), - "Number of heap bytes that are in use.", + "Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) }, @@ -108,7 +101,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_released_bytes"), - "Number of heap bytes released to OS.", + "Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, @@ -116,7 +109,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_objects"), - "Number of allocated objects.", + "Number of currently allocated objects. Equals to /gc/heap/objects:objects.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) }, @@ -124,7 +117,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("stack_inuse_bytes"), - "Number of bytes in use by the stack allocator.", + "Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) }, @@ -132,7 +125,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("stack_sys_bytes"), - "Number of bytes obtained from system for stack allocator.", + "Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) }, @@ -140,7 +133,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mspan_inuse_bytes"), - "Number of bytes in use by mspan structures.", + "Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) }, @@ -148,7 +141,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mspan_sys_bytes"), - "Number of bytes used for mspan structures obtained from system.", + "Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) }, @@ -156,7 +149,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mcache_inuse_bytes"), - "Number of bytes in use by mcache structures.", + "Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) }, @@ -164,7 +157,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mcache_sys_bytes"), - "Number of bytes used for mcache structures obtained from system.", + "Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) }, @@ -172,7 +165,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("buck_hash_sys_bytes"), - "Number of bytes used by the profiling bucket hash table.", + "Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) }, @@ -180,7 +173,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("gc_sys_bytes"), - "Number of bytes used for garbage collection system metadata.", + "Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) }, @@ -188,7 +181,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("other_sys_bytes"), - "Number of bytes used for other system allocations.", + "Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) }, @@ -196,7 +189,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("next_gc_bytes"), - "Number of heap bytes when next garbage collection will take place.", + "Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, @@ -225,7 +218,7 @@ func newBaseGoCollector() baseGoCollector { nil, nil), gcDesc: NewDesc( "go_gc_duration_seconds", - "A summary of the pause duration of garbage collection cycles.", + "A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles.", nil, nil), gcLastTimeDesc: NewDesc( "go_memstats_last_gc_time_seconds", diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go index 2d8d9f64f4305..5117464172912 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go @@ -17,6 +17,7 @@ package prometheus import ( + "fmt" "math" "runtime" "runtime/metrics" @@ -153,7 +154,8 @@ func defaultGoCollectorOptions() internal.GoCollectorOptions { "/gc/heap/frees-by-size:bytes": goGCHeapFreesBytes, }, RuntimeMetricRules: []internal.GoCollectorRule{ - //{Matcher: regexp.MustCompile("")}, + // Recommended metrics we want by default from runtime/metrics. + {Matcher: internal.GoCollectorDefaultRuntimeMetrics}, }, } } @@ -203,6 +205,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { // to fail here. This condition is tested in TestExpectedRuntimeMetrics. continue } + help := attachOriginalName(d.Description.Description, d.Name) sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name}) sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1] @@ -214,7 +217,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { m = newBatchHistogram( NewDesc( BuildFQName(namespace, subsystem, name), - d.Description.Description, + help, nil, nil, ), @@ -226,7 +229,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { Namespace: namespace, Subsystem: subsystem, Name: name, - Help: d.Description.Description, + Help: help, }, ) } else { @@ -234,7 +237,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { Namespace: namespace, Subsystem: subsystem, Name: name, - Help: d.Description.Description, + Help: help, }) } metricSet = append(metricSet, m) @@ -284,6 +287,10 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { } } +func attachOriginalName(desc, origName string) string { + return fmt.Sprintf("%s Sourced from %s", desc, origName) +} + // Describe returns all descriptions of the collector. func (c *goCollector) Describe(ch chan<- *Desc) { c.base.Describe(ch) @@ -376,13 +383,13 @@ func unwrapScalarRMValue(v metrics.Value) float64 { // // This should never happen because we always populate our metric // set from the runtime/metrics package. - panic("unexpected unsupported metric") + panic("unexpected bad kind metric") default: // Unsupported metric kind. // // This should never happen because we check for this during initialization // and flag and filter metrics whose kinds we don't understand. - panic("unexpected unsupported metric kind") + panic(fmt.Sprintf("unexpected unsupported metric: %v", v.Kind())) } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index b5c8bcb395aa5..519db348a747c 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -440,7 +440,7 @@ type HistogramOpts struct { // constant (or any negative float value). NativeHistogramZeroThreshold float64 - // The remaining fields define a strategy to limit the number of + // The next three fields define a strategy to limit the number of // populated sparse buckets. If NativeHistogramMaxBucketNumber is left // at zero, the number of buckets is not limited. (Note that this might // lead to unbounded memory consumption if the values observed by the @@ -473,6 +473,22 @@ type HistogramOpts struct { NativeHistogramMinResetDuration time.Duration NativeHistogramMaxZeroThreshold float64 + // NativeHistogramMaxExemplars limits the number of exemplars + // that are kept in memory for each native histogram. If you leave it at + // zero, a default value of 10 is used. If no exemplars should be kept specifically + // for native histograms, set it to a negative value. (Scrapers can + // still use the exemplars exposed for classic buckets, which are managed + // independently.) + NativeHistogramMaxExemplars int + // NativeHistogramExemplarTTL is only checked once + // NativeHistogramMaxExemplars is exceeded. In that case, the + // oldest exemplar is removed if it is older than NativeHistogramExemplarTTL. + // Otherwise, the older exemplar in the pair of exemplars that are closest + // together (on an exponential scale) is removed. + // If NativeHistogramExemplarTTL is left at its zero value, a default value of + // 5m is used. To always delete the oldest exemplar, set it to a negative value. + NativeHistogramExemplarTTL time.Duration + // now is for testing purposes, by default it's time.Now. now func() time.Time @@ -532,6 +548,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr if opts.afterFunc == nil { opts.afterFunc = time.AfterFunc } + h := &histogram{ desc: desc, upperBounds: opts.Buckets, @@ -556,6 +573,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr h.nativeHistogramZeroThreshold = DefNativeHistogramZeroThreshold } // Leave h.nativeHistogramZeroThreshold at 0 otherwise. h.nativeHistogramSchema = pickSchema(opts.NativeHistogramBucketFactor) + h.nativeExemplars = makeNativeExemplars(opts.NativeHistogramExemplarTTL, opts.NativeHistogramMaxExemplars) } for i, upperBound := range h.upperBounds { if i < len(h.upperBounds)-1 { @@ -725,7 +743,8 @@ type histogram struct { // resetScheduled is protected by mtx. It is true if a reset is // scheduled for a later time (when nativeHistogramMinResetDuration has // passed). - resetScheduled bool + resetScheduled bool + nativeExemplars nativeExemplars // now is for testing purposes, by default it's time.Now. now func() time.Time @@ -742,6 +761,9 @@ func (h *histogram) Observe(v float64) { h.observe(v, h.findBucket(v)) } +// ObserveWithExemplar should not be called in a high-frequency setting +// for a native histogram with configured exemplars. For this case, +// the implementation isn't lock-free and might suffer from lock contention. func (h *histogram) ObserveWithExemplar(v float64, e Labels) { i := h.findBucket(v) h.observe(v, i) @@ -821,6 +843,13 @@ func (h *histogram) Write(out *dto.Metric) error { Length: proto.Uint32(0), }} } + + if h.nativeExemplars.isEnabled() { + h.nativeExemplars.Lock() + his.Exemplars = append(his.Exemplars, h.nativeExemplars.exemplars...) + h.nativeExemplars.Unlock() + } + } addAndResetCounts(hotCounts, coldCounts) return nil @@ -1091,8 +1120,10 @@ func (h *histogram) resetCounts(counts *histogramCounts) { deleteSyncMap(&counts.nativeHistogramBucketsPositive) } -// updateExemplar replaces the exemplar for the provided bucket. With empty -// labels, it's a no-op. It panics if any of the labels is invalid. +// updateExemplar replaces the exemplar for the provided classic bucket. +// With empty labels, it's a no-op. It panics if any of the labels is invalid. +// If histogram is native, the exemplar will be cached into nativeExemplars, +// which has a limit, and will remove one exemplar when limit is reached. func (h *histogram) updateExemplar(v float64, bucket int, l Labels) { if l == nil { return @@ -1102,6 +1133,10 @@ func (h *histogram) updateExemplar(v float64, bucket int, l Labels) { panic(err) } h.exemplars[bucket].Store(e) + doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v) + if doSparse { + h.nativeExemplars.addExemplar(e) + } } // HistogramVec is a Collector that bundles a set of Histograms that all share the @@ -1336,6 +1371,48 @@ func MustNewConstHistogram( return m } +// NewConstHistogramWithCreatedTimestamp does the same thing as NewConstHistogram but sets the created timestamp. +func NewConstHistogramWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + ct time.Time, + labelValues ...string, +) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { + return nil, err + } + return &constHistogram{ + desc: desc, + count: count, + sum: sum, + buckets: buckets, + labelPairs: MakeLabelPairs(desc, labelValues), + createdTs: timestamppb.New(ct), + }, nil +} + +// MustNewConstHistogramWithCreatedTimestamp is a version of NewConstHistogramWithCreatedTimestamp that panics where +// NewConstHistogramWithCreatedTimestamp would have returned an error. +func MustNewConstHistogramWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + ct time.Time, + labelValues ...string, +) Metric { + m, err := NewConstHistogramWithCreatedTimestamp(desc, count, sum, buckets, ct, labelValues...) + if err != nil { + panic(err) + } + return m +} + type buckSort []*dto.Bucket func (s buckSort) Len() int { @@ -1575,3 +1652,186 @@ func addAndResetCounts(hot, cold *histogramCounts) { atomic.AddUint64(&hot.nativeHistogramZeroBucket, atomic.LoadUint64(&cold.nativeHistogramZeroBucket)) atomic.StoreUint64(&cold.nativeHistogramZeroBucket, 0) } + +type nativeExemplars struct { + sync.Mutex + + // Time-to-live for exemplars, it is set to -1 if exemplars are disabled, that is NativeHistogramMaxExemplars is below 0. + // The ttl is used on insertion to remove an exemplar that is older than ttl, if present. + ttl time.Duration + + exemplars []*dto.Exemplar +} + +func (n *nativeExemplars) isEnabled() bool { + return n.ttl != -1 +} + +func makeNativeExemplars(ttl time.Duration, maxCount int) nativeExemplars { + if ttl == 0 { + ttl = 5 * time.Minute + } + + if maxCount == 0 { + maxCount = 10 + } + + if maxCount < 0 { + maxCount = 0 + ttl = -1 + } + + return nativeExemplars{ + ttl: ttl, + exemplars: make([]*dto.Exemplar, 0, maxCount), + } +} + +func (n *nativeExemplars) addExemplar(e *dto.Exemplar) { + if !n.isEnabled() { + return + } + + n.Lock() + defer n.Unlock() + + // When the number of exemplars has not yet exceeded or + // is equal to cap(n.exemplars), then + // insert the new exemplar directly. + if len(n.exemplars) < cap(n.exemplars) { + var nIdx int + for nIdx = 0; nIdx < len(n.exemplars); nIdx++ { + if *e.Value < *n.exemplars[nIdx].Value { + break + } + } + n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...) + return + } + + if len(n.exemplars) == 1 { + // When the number of exemplars is 1, then + // replace the existing exemplar with the new exemplar. + n.exemplars[0] = e + return + } + // From this point on, the number of exemplars is greater than 1. + + // When the number of exemplars exceeds the limit, remove one exemplar. + var ( + ot = time.Time{} // Oldest timestamp seen. Initial value doesn't matter as we replace it due to otIdx == -1 in the loop. + otIdx = -1 // Index of the exemplar with the oldest timestamp. + + md = -1.0 // Logarithm of the delta of the closest pair of exemplars. + + // The insertion point of the new exemplar in the exemplars slice after insertion. + // This is calculated purely based on the order of the exemplars by value. + // nIdx == len(n.exemplars) means the new exemplar is to be inserted after the end. + nIdx = -1 + + // rIdx is ultimately the index for the exemplar that we are replacing with the new exemplar. + // The aim is to keep a good spread of exemplars by value and not let them bunch up too much. + // It is calculated in 3 steps: + // 1. First we set rIdx to the index of the older exemplar within the closest pair by value. + // That is the following will be true (on log scale): + // either the exemplar pair on index (rIdx-1, rIdx) or (rIdx, rIdx+1) will have + // the closest values to each other from all pairs. + // For example, suppose the values are distributed like this: + // |-----------x-------------x----------------x----x-----| + // ^--rIdx as this is older. + // Or like this: + // |-----------x-------------x----------------x----x-----| + // ^--rIdx as this is older. + // 2. If there is an exemplar that expired, then we simple reset rIdx to that index. + // 3. We check if by inserting the new exemplar we would create a closer pair at + // (nIdx-1, nIdx) or (nIdx, nIdx+1) and set rIdx to nIdx-1 or nIdx accordingly to + // keep the spread of exemplars by value; otherwise we keep rIdx as it is. + rIdx = -1 + cLog float64 // Logarithm of the current exemplar. + pLog float64 // Logarithm of the previous exemplar. + ) + + for i, exemplar := range n.exemplars { + // Find the exemplar with the oldest timestamp. + if otIdx == -1 || exemplar.Timestamp.AsTime().Before(ot) { + ot = exemplar.Timestamp.AsTime() + otIdx = i + } + + // Find the index at which to insert new the exemplar. + if nIdx == -1 && *e.Value <= *exemplar.Value { + nIdx = i + } + + // Find the two closest exemplars and pick the one the with older timestamp. + pLog = cLog + cLog = math.Log(exemplar.GetValue()) + if i == 0 { + continue + } + diff := math.Abs(cLog - pLog) + if md == -1 || diff < md { + // The closest exemplar pair is at index: i-1, i. + // Choose the exemplar with the older timestamp for replacement. + md = diff + if n.exemplars[i].Timestamp.AsTime().Before(n.exemplars[i-1].Timestamp.AsTime()) { + rIdx = i + } else { + rIdx = i - 1 + } + } + + } + + // If all existing exemplar are smaller than new exemplar, + // then the exemplar should be inserted at the end. + if nIdx == -1 { + nIdx = len(n.exemplars) + } + // Here, we have the following relationships: + // n.exemplars[nIdx-1].Value < e.Value (if nIdx > 0) + // e.Value <= n.exemplars[nIdx].Value (if nIdx < len(n.exemplars)) + + if otIdx != -1 && e.Timestamp.AsTime().Sub(ot) > n.ttl { + // If the oldest exemplar has expired, then replace it with the new exemplar. + rIdx = otIdx + } else { + // In the previous for loop, when calculating the closest pair of exemplars, + // we did not take into account the newly inserted exemplar. + // So we need to calculate with the newly inserted exemplar again. + elog := math.Log(e.GetValue()) + if nIdx > 0 { + diff := math.Abs(elog - math.Log(n.exemplars[nIdx-1].GetValue())) + if diff < md { + // The value we are about to insert is closer to the previous exemplar at the insertion point than what we calculated before in rIdx. + // v--rIdx + // |-----------x-n-----------x----------------x----x-----| + // nIdx-1--^ ^--new exemplar value + // Do not make the spread worse, replace nIdx-1 and not rIdx. + md = diff + rIdx = nIdx - 1 + } + } + if nIdx < len(n.exemplars) { + diff := math.Abs(math.Log(n.exemplars[nIdx].GetValue()) - elog) + if diff < md { + // The value we are about to insert is closer to the next exemplar at the insertion point than what we calculated before in rIdx. + // v--rIdx + // |-----------x-----------n-x----------------x----x-----| + // new exemplar value--^ ^--nIdx + // Do not make the spread worse, replace nIdx-1 and not rIdx. + rIdx = nIdx + } + } + } + + // Adjust the slice according to rIdx and nIdx. + switch { + case rIdx == nIdx: + n.exemplars[nIdx] = e + case rIdx < nIdx: + n.exemplars = append(n.exemplars[:rIdx], append(n.exemplars[rIdx+1:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...)...) + case rIdx > nIdx: + n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, append(n.exemplars[nIdx:rIdx], n.exemplars[rIdx+1:]...)...)...) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go index 723b45d644445..a4fa6eabd7887 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go @@ -30,3 +30,5 @@ type GoCollectorOptions struct { RuntimeMetricSumForHist map[string]string RuntimeMetricRules []GoCollectorRule } + +var GoCollectorDefaultRuntimeMetrics = regexp.MustCompile(`/gc/gogc:percent|/gc/gomemlimit:bytes|/sched/gomaxprocs:threads`) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go index f018e57237d52..9d9b81ab44810 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -234,7 +234,7 @@ func NewMetricWithExemplars(m Metric, exemplars ...Exemplar) (Metric, error) { ) for i, e := range exemplars { ts := e.Timestamp - if ts == (time.Time{}) { + if ts.IsZero() { ts = now } exs[i], err = newExemplar(e.Value, ts, e.Labels) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go index 8548dd18ed5e9..62a4e7ad9a06b 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -22,14 +22,15 @@ import ( ) type processCollector struct { - collectFn func(chan<- Metric) - pidFn func() (int, error) - reportErrors bool - cpuTotal *Desc - openFDs, maxFDs *Desc - vsize, maxVsize *Desc - rss *Desc - startTime *Desc + collectFn func(chan<- Metric) + pidFn func() (int, error) + reportErrors bool + cpuTotal *Desc + openFDs, maxFDs *Desc + vsize, maxVsize *Desc + rss *Desc + startTime *Desc + inBytes, outBytes *Desc } // ProcessCollectorOpts defines the behavior of a process metrics collector @@ -100,6 +101,16 @@ func NewProcessCollector(opts ProcessCollectorOpts) Collector { "Start time of the process since unix epoch in seconds.", nil, nil, ), + inBytes: NewDesc( + ns+"process_network_receive_bytes_total", + "Number of bytes received by the process over the network.", + nil, nil, + ), + outBytes: NewDesc( + ns+"process_network_transmit_bytes_total", + "Number of bytes sent by the process over the network.", + nil, nil, + ), } if opts.PidFn == nil { @@ -129,6 +140,8 @@ func (c *processCollector) Describe(ch chan<- *Desc) { ch <- c.maxVsize ch <- c.rss ch <- c.startTime + ch <- c.inBytes + ch <- c.outBytes } // Collect returns the current state of all metrics of the collector. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go index 8c1136ceea355..14d56d2d06808 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go @@ -63,4 +63,18 @@ func (c *processCollector) processCollect(ch chan<- Metric) { } else { c.reportError(ch, nil, err) } + + if netstat, err := p.Netstat(); err == nil { + var inOctets, outOctets float64 + if netstat.IpExt.InOctets != nil { + inOctets = *netstat.IpExt.InOctets + } + if netstat.IpExt.OutOctets != nil { + outOctets = *netstat.IpExt.OutOctets + } + ch <- MustNewConstMetric(c.inBytes, CounterValue, inOctets) + ch <- MustNewConstMetric(c.outBytes, CounterValue, outOctets) + } else { + c.reportError(ch, nil, err) + } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go index 9819917b83b35..315eab5f179bd 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go @@ -76,6 +76,12 @@ func (r *responseWriterDelegator) Write(b []byte) (int, error) { return n, err } +// Unwrap lets http.ResponseController get the underlying http.ResponseWriter, +// by implementing the [rwUnwrapper](https://cs.opensource.google/go/go/+/refs/tags/go1.21.4:src/net/http/responsecontroller.go;l=42-44) interface. +func (r *responseWriterDelegator) Unwrap() http.ResponseWriter { + return r.ResponseWriter +} + type ( closeNotifierDelegator struct{ *responseWriterDelegator } flusherDelegator struct{ *responseWriterDelegator } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go index 09b8d2fbead05..e598e66e688ea 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -38,12 +38,13 @@ import ( "io" "net/http" "strconv" - "strings" "sync" "time" + "github.com/klauspost/compress/zstd" "github.com/prometheus/common/expfmt" + "github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil" "github.com/prometheus/client_golang/prometheus" ) @@ -54,6 +55,18 @@ const ( processStartTimeHeader = "Process-Start-Time-Unix" ) +// Compression represents the content encodings handlers support for the HTTP +// responses. +type Compression string + +const ( + Identity Compression = "identity" + Gzip Compression = "gzip" + Zstd Compression = "zstd" +) + +var defaultCompressionFormats = []Compression{Identity, Gzip, Zstd} + var gzipPool = sync.Pool{ New: func() interface{} { return gzip.NewWriter(nil) @@ -122,6 +135,18 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO } } + // Select compression formats to offer based on default or user choice. + var compressions []string + if !opts.DisableCompression { + offers := defaultCompressionFormats + if len(opts.OfferedCompressions) > 0 { + offers = opts.OfferedCompressions + } + for _, comp := range offers { + compressions = append(compressions, string(comp)) + } + } + h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { if !opts.ProcessStartTime.IsZero() { rsp.Header().Set(processStartTimeHeader, strconv.FormatInt(opts.ProcessStartTime.Unix(), 10)) @@ -165,21 +190,23 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO } else { contentType = expfmt.Negotiate(req.Header) } - header := rsp.Header() - header.Set(contentTypeHeader, string(contentType)) + rsp.Header().Set(contentTypeHeader, string(contentType)) - w := io.Writer(rsp) - if !opts.DisableCompression && gzipAccepted(req.Header) { - header.Set(contentEncodingHeader, "gzip") - gz := gzipPool.Get().(*gzip.Writer) - defer gzipPool.Put(gz) + w, encodingHeader, closeWriter, err := negotiateEncodingWriter(req, rsp, compressions) + if err != nil { + if opts.ErrorLog != nil { + opts.ErrorLog.Println("error getting writer", err) + } + w = io.Writer(rsp) + encodingHeader = string(Identity) + } - gz.Reset(w) - defer gz.Close() + defer closeWriter() - w = gz + // Set Content-Encoding only when data is compressed + if encodingHeader != string(Identity) { + rsp.Header().Set(contentEncodingHeader, encodingHeader) } - enc := expfmt.NewEncoder(w, contentType) // handleError handles the error according to opts.ErrorHandling @@ -343,9 +370,19 @@ type HandlerOpts struct { // no effect on the HTTP status code because ErrorHandling is set to // ContinueOnError. Registry prometheus.Registerer - // If DisableCompression is true, the handler will never compress the - // response, even if requested by the client. + // DisableCompression disables the response encoding (compression) and + // encoding negotiation. If true, the handler will + // never compress the response, even if requested + // by the client and the OfferedCompressions field is set. DisableCompression bool + // OfferedCompressions is a set of encodings (compressions) handler will + // try to offer when negotiating with the client. This defaults to identity, gzip + // and zstd. + // NOTE: If handler can't agree with the client on the encodings or + // unsupported or empty encodings are set in OfferedCompressions, + // handler always fallbacks to no compression (identity), for + // compatibility reasons. In such cases ErrorLog will be used if set. + OfferedCompressions []Compression // The number of concurrent HTTP requests is limited to // MaxRequestsInFlight. Additional requests are responded to with 503 // Service Unavailable and a suitable message in the body. If @@ -381,19 +418,6 @@ type HandlerOpts struct { ProcessStartTime time.Time } -// gzipAccepted returns whether the client will accept gzip-encoded content. -func gzipAccepted(header http.Header) bool { - a := header.Get(acceptEncodingHeader) - parts := strings.Split(a, ",") - for _, part := range parts { - part = strings.TrimSpace(part) - if part == "gzip" || strings.HasPrefix(part, "gzip;") { - return true - } - } - return false -} - // httpError removes any content-encoding header and then calls http.Error with // the provided error and http.StatusInternalServerError. Error contents is // supposed to be uncompressed plain text. Same as with a plain http.Error, this @@ -406,3 +430,38 @@ func httpError(rsp http.ResponseWriter, err error) { http.StatusInternalServerError, ) } + +// negotiateEncodingWriter reads the Accept-Encoding header from a request and +// selects the right compression based on an allow-list of supported +// compressions. It returns a writer implementing the compression and an the +// correct value that the caller can set in the response header. +func negotiateEncodingWriter(r *http.Request, rw io.Writer, compressions []string) (_ io.Writer, encodingHeaderValue string, closeWriter func(), _ error) { + if len(compressions) == 0 { + return rw, string(Identity), func() {}, nil + } + + // TODO(mrueg): Replace internal/github.com/gddo once https://github.com/golang/go/issues/19307 is implemented. + selected := httputil.NegotiateContentEncoding(r, compressions) + + switch selected { + case "zstd": + // TODO(mrueg): Replace klauspost/compress with stdlib implementation once https://github.com/golang/go/issues/62513 is implemented. + z, err := zstd.NewWriter(rw, zstd.WithEncoderLevel(zstd.SpeedFastest)) + if err != nil { + return nil, "", func() {}, err + } + + z.Reset(rw) + return z, selected, func() { _ = z.Close() }, nil + case "gzip": + gz := gzipPool.Get().(*gzip.Writer) + gz.Reset(rw) + return gz, selected, func() { _ = gz.Close(); gzipPool.Put(gz) }, nil + case "identity": + // This means the content is not compressed. + return rw, selected, func() {}, nil + default: + // The content encoding was not implemented yet. + return nil, "", func() {}, fmt.Errorf("content compression format not recognized: %s. Valid formats are: %s", selected, defaultCompressionFormats) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index 5e2ced25a02f7..c6fd2f58b7426 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -314,16 +314,17 @@ func (r *Registry) Register(c Collector) error { if dimHash != desc.dimHash { return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) } - } else { - // ...then check the new descriptors already seen. - if dimHash, exists := newDimHashesByName[desc.fqName]; exists { - if dimHash != desc.dimHash { - return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) - } - } else { - newDimHashesByName[desc.fqName] = desc.dimHash + continue + } + + // ...then check the new descriptors already seen. + if dimHash, exists := newDimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) } + continue } + newDimHashesByName[desc.fqName] = desc.dimHash } // A Collector yielding no Desc at all is considered unchecked. if len(newDescIDs) == 0 { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index 1462704446c80..1ab0e47965504 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -783,3 +783,45 @@ func MustNewConstSummary( } return m } + +// NewConstSummaryWithCreatedTimestamp does the same thing as NewConstSummary but sets the created timestamp. +func NewConstSummaryWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + ct time.Time, + labelValues ...string, +) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { + return nil, err + } + return &constSummary{ + desc: desc, + count: count, + sum: sum, + quantiles: quantiles, + labelPairs: MakeLabelPairs(desc, labelValues), + createdTs: timestamppb.New(ct), + }, nil +} + +// MustNewConstSummaryWithCreatedTimestamp is a version of NewConstSummaryWithCreatedTimestamp that panics where +// NewConstSummaryWithCreatedTimestamp would have returned an error. +func MustNewConstSummaryWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + ct time.Time, + labelValues ...string, +) Metric { + m, err := NewConstSummaryWithCreatedTimestamp(desc, count, sum, quantiles, ct, labelValues...) + if err != nil { + panic(err) + } + return m +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go index f52ad9eab67b4..e1441598da84f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go @@ -30,4 +30,5 @@ var defaultValidations = []Validation{ validations.LintReservedChars, validations.LintCamelCase, validations.LintUnitAbbreviations, + validations.LintDuplicateMetric, } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/duplicate_validations.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/duplicate_validations.go new file mode 100644 index 0000000000000..fdc1e62394823 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/duplicate_validations.go @@ -0,0 +1,37 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validations + +import ( + "fmt" + "reflect" + + dto "github.com/prometheus/client_model/go" +) + +// LintDuplicateMetric detects duplicate metric. +func LintDuplicateMetric(mf *dto.MetricFamily) []error { + var problems []error + + for i, m := range mf.Metric { + for _, k := range mf.Metric[i+1:] { + if reflect.DeepEqual(m.Label, k.Label) { + problems = append(problems, fmt.Errorf("metric not unique")) + break + } + } + } + + return problems +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go index bc8dbd1e16b97..de52cfee443c5 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go @@ -44,21 +44,21 @@ func LintMetricUnits(mf *dto.MetricFamily) []error { return problems } -// LintMetricTypeInName detects when metric types are included in the metric name. +// LintMetricTypeInName detects when the metric type is included in the metric name. func LintMetricTypeInName(mf *dto.MetricFamily) []error { + if mf.GetType() == dto.MetricType_UNTYPED { + return nil + } + var problems []error - n := strings.ToLower(mf.GetName()) - for i, t := range dto.MetricType_name { - if i == int32(dto.MetricType_UNTYPED) { - continue - } + n := strings.ToLower(mf.GetName()) + typename := strings.ToLower(mf.GetType().String()) - typename := strings.ToLower(t) - if strings.Contains(n, "_"+typename+"_") || strings.HasSuffix(n, "_"+typename) { - problems = append(problems, fmt.Errorf(`metric name should not include type '%s'`, typename)) - } + if strings.Contains(n, "_"+typename+"_") || strings.HasSuffix(n, "_"+typename) { + problems = append(problems, fmt.Errorf(`metric name should not include type '%s'`, typename)) } + return problems } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go index 9dce15eafa2e3..6f1200180a76f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go @@ -42,9 +42,8 @@ import ( "fmt" "io" "net/http" - "reflect" - "github.com/davecgh/go-spew/spew" + "github.com/kylelemons/godebug/diff" dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" "google.golang.org/protobuf/proto" @@ -159,6 +158,9 @@ func GatherAndCount(g prometheus.Gatherer, metricNames ...string) (int, error) { // ScrapeAndCompare calls a remote exporter's endpoint which is expected to return some metrics in // plain text format. Then it compares it with the results that the `expected` would return. // If the `metricNames` is not empty it would filter the comparison only to the given metric names. +// +// NOTE: Be mindful of accidental discrepancies between expected and metricNames; metricNames filter +// both expected and scraped metrics. See https://github.com/prometheus/client_golang/issues/1351. func ScrapeAndCompare(url string, expected io.Reader, metricNames ...string) error { resp, err := http.Get(url) if err != nil { @@ -184,9 +186,11 @@ func ScrapeAndCompare(url string, expected io.Reader, metricNames ...string) err return compareMetricFamilies(scraped, wanted, metricNames...) } -// CollectAndCompare registers the provided Collector with a newly created -// pedantic Registry. It then calls GatherAndCompare with that Registry and with -// the provided metricNames. +// CollectAndCompare collects the metrics identified by `metricNames` and compares them in the Prometheus text +// exposition format to the data read from expected. +// +// NOTE: Be mindful of accidental discrepancies between expected and metricNames; metricNames filter +// both expected and collected metrics. See https://github.com/prometheus/client_golang/issues/1351. func CollectAndCompare(c prometheus.Collector, expected io.Reader, metricNames ...string) error { reg := prometheus.NewPedanticRegistry() if err := reg.Register(c); err != nil { @@ -199,6 +203,9 @@ func CollectAndCompare(c prometheus.Collector, expected io.Reader, metricNames . // it to an expected output read from the provided Reader in the Prometheus text // exposition format. If any metricNames are provided, only metrics with those // names are compared. +// +// NOTE: Be mindful of accidental discrepancies between expected and metricNames; metricNames filter +// both expected and gathered metrics. See https://github.com/prometheus/client_golang/issues/1351. func GatherAndCompare(g prometheus.Gatherer, expected io.Reader, metricNames ...string) error { return TransactionalGatherAndCompare(prometheus.ToTransactionalGatherer(g), expected, metricNames...) } @@ -207,6 +214,9 @@ func GatherAndCompare(g prometheus.Gatherer, expected io.Reader, metricNames ... // it to an expected output read from the provided Reader in the Prometheus text // exposition format. If any metricNames are provided, only metrics with those // names are compared. +// +// NOTE: Be mindful of accidental discrepancies between expected and metricNames; metricNames filter +// both expected and gathered metrics. See https://github.com/prometheus/client_golang/issues/1351. func TransactionalGatherAndCompare(g prometheus.TransactionalGatherer, expected io.Reader, metricNames ...string) error { got, done, err := g.Gather() defer done() @@ -222,6 +232,31 @@ func TransactionalGatherAndCompare(g prometheus.TransactionalGatherer, expected return compareMetricFamilies(got, wanted, metricNames...) } +// CollectAndFormat collects the metrics identified by `metricNames` and returns them in the given format. +func CollectAndFormat(c prometheus.Collector, format expfmt.FormatType, metricNames ...string) ([]byte, error) { + reg := prometheus.NewPedanticRegistry() + if err := reg.Register(c); err != nil { + return nil, fmt.Errorf("registering collector failed: %w", err) + } + + gotFiltered, err := reg.Gather() + if err != nil { + return nil, fmt.Errorf("gathering metrics failed: %w", err) + } + + gotFiltered = filterMetrics(gotFiltered, metricNames) + + var gotFormatted bytes.Buffer + enc := expfmt.NewEncoder(&gotFormatted, expfmt.NewFormat(format)) + for _, mf := range gotFiltered { + if err := enc.Encode(mf); err != nil { + return nil, fmt.Errorf("encoding gathered metrics failed: %w", err) + } + } + + return gotFormatted.Bytes(), nil +} + // convertReaderToMetricFamily would read from a io.Reader object and convert it to a slice of // dto.MetricFamily. func convertReaderToMetricFamily(reader io.Reader) ([]*dto.MetricFamily, error) { @@ -277,73 +312,12 @@ func compare(got, want []*dto.MetricFamily) error { return fmt.Errorf("encoding expected metrics failed: %w", err) } } - if diffErr := diff(wantBuf, gotBuf); diffErr != "" { + if diffErr := diff.Diff(gotBuf.String(), wantBuf.String()); diffErr != "" { return fmt.Errorf(diffErr) } return nil } -// diff returns a diff of both values as long as both are of the same type and -// are a struct, map, slice, array or string. Otherwise it returns an empty string. -func diff(expected, actual interface{}) string { - if expected == nil || actual == nil { - return "" - } - - et, ek := typeAndKind(expected) - at, _ := typeAndKind(actual) - if et != at { - return "" - } - - if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array && ek != reflect.String { - return "" - } - - var e, a string - c := spew.ConfigState{ - Indent: " ", - DisablePointerAddresses: true, - DisableCapacities: true, - SortKeys: true, - } - if et != reflect.TypeOf("") { - e = c.Sdump(expected) - a = c.Sdump(actual) - } else { - e = reflect.ValueOf(expected).String() - a = reflect.ValueOf(actual).String() - } - - diff, _ := internal.GetUnifiedDiffString(internal.UnifiedDiff{ - A: internal.SplitLines(e), - B: internal.SplitLines(a), - FromFile: "metric output does not match expectation; want", - FromDate: "", - ToFile: "got:", - ToDate: "", - Context: 1, - }) - - if diff == "" { - return "" - } - - return "\n\nDiff:\n" + diff -} - -// typeAndKind returns the type and kind of the given interface{} -func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { - t := reflect.TypeOf(v) - k := t.Kind() - - if k == reflect.Ptr { - t = t.Elem() - k = t.Kind() - } - return t, k -} - func filterMetrics(metrics []*dto.MetricFamily, names []string) []*dto.MetricFamily { var filtered []*dto.MetricFamily for _, m := range metrics { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go index 955cfd59f8398..2c808eece0a0c 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -507,7 +507,7 @@ func (m *metricMap) getOrCreateMetricWithLabelValues( return metric } -// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// getOrCreateMetricWithLabels retrieves the metric by hash and label value // or creates it and returns the new one. // // This function holds the mutex. diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index 25cfaa21643e4..1448439b7f722 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -45,7 +45,7 @@ func ResponseFormat(h http.Header) Format { mediatype, params, err := mime.ParseMediaType(ct) if err != nil { - return fmtUnknown + return FmtUnknown } const textType = "text/plain" @@ -53,21 +53,21 @@ func ResponseFormat(h http.Header) Format { switch mediatype { case ProtoType: if p, ok := params["proto"]; ok && p != ProtoProtocol { - return fmtUnknown + return FmtUnknown } if e, ok := params["encoding"]; ok && e != "delimited" { - return fmtUnknown + return FmtUnknown } - return fmtProtoDelim + return FmtProtoDelim case textType: if v, ok := params["version"]; ok && v != TextVersion { - return fmtUnknown + return FmtUnknown } - return fmtText + return FmtText } - return fmtUnknown + return FmtUnknown } // NewDecoder returns a new decoder based on the given input format. diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index ff5ef7a9d9204..cf0c150c2e184 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -77,18 +77,18 @@ func Negotiate(h http.Header) Format { if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { switch ac.Params["encoding"] { case "delimited": - return fmtProtoDelim + escapingScheme + return FmtProtoDelim + escapingScheme case "text": - return fmtProtoText + escapingScheme + return FmtProtoText + escapingScheme case "compact-text": - return fmtProtoCompact + escapingScheme + return FmtProtoCompact + escapingScheme } } if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return fmtText + escapingScheme + return FmtText + escapingScheme } } - return fmtText + escapingScheme + return FmtText + escapingScheme } // NegotiateIncludingOpenMetrics works like Negotiate but includes @@ -110,26 +110,26 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format { if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { switch ac.Params["encoding"] { case "delimited": - return fmtProtoDelim + escapingScheme + return FmtProtoDelim + escapingScheme case "text": - return fmtProtoText + escapingScheme + return FmtProtoText + escapingScheme case "compact-text": - return fmtProtoCompact + escapingScheme + return FmtProtoCompact + escapingScheme } } if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return fmtText + escapingScheme + return FmtText + escapingScheme } if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") { switch ver { case OpenMetricsVersion_1_0_0: - return fmtOpenMetrics_1_0_0 + escapingScheme + return FmtOpenMetrics_1_0_0 + escapingScheme default: - return fmtOpenMetrics_0_0_1 + escapingScheme + return FmtOpenMetrics_0_0_1 + escapingScheme } } } - return fmtText + escapingScheme + return FmtText + escapingScheme } // NewEncoder returns a new encoder based on content type negotiation. All diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go index 051b38cd17813..d942af8edd46d 100644 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -32,24 +32,31 @@ type Format string // it on the wire, new content-type strings will have to be agreed upon and // added here. const ( - TextVersion = "0.0.4" - ProtoType = `application/vnd.google.protobuf` - ProtoProtocol = `io.prometheus.client.MetricFamily` - protoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + TextVersion = "0.0.4" + ProtoType = `application/vnd.google.protobuf` + ProtoProtocol = `io.prometheus.client.MetricFamily` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" OpenMetricsType = `application/openmetrics-text` OpenMetricsVersion_0_0_1 = "0.0.1" OpenMetricsVersion_1_0_0 = "1.0.0" - // The Content-Type values for the different wire protocols. Note that these - // values are now unexported. If code was relying on comparisons to these - // constants, instead use FormatType(). - fmtUnknown Format = `` - fmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` - fmtProtoDelim Format = protoFmt + ` encoding=delimited` - fmtProtoText Format = protoFmt + ` encoding=text` - fmtProtoCompact Format = protoFmt + ` encoding=compact-text` - fmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` - fmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` + // The Content-Type values for the different wire protocols. Do not do direct + // comparisons to these constants, instead use the comparison functions. + // Deprecated: Use expfmt.NewFormat(expfmt.TypeUnknown) instead. + FmtUnknown Format = `` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeTextPlain) instead. + FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoDelim) instead. + FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoText) instead. + FmtProtoText Format = ProtoFmt + ` encoding=text` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. + FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. + FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. + FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` ) const ( @@ -79,17 +86,17 @@ const ( func NewFormat(t FormatType) Format { switch t { case TypeProtoCompact: - return fmtProtoCompact + return FmtProtoCompact case TypeProtoDelim: - return fmtProtoDelim + return FmtProtoDelim case TypeProtoText: - return fmtProtoText + return FmtProtoText case TypeTextPlain: - return fmtText + return FmtText case TypeOpenMetrics: - return fmtOpenMetrics_1_0_0 + return FmtOpenMetrics_1_0_0 default: - return fmtUnknown + return FmtUnknown } } @@ -97,12 +104,35 @@ func NewFormat(t FormatType) Format { // specified version number. func NewOpenMetricsFormat(version string) (Format, error) { if version == OpenMetricsVersion_0_0_1 { - return fmtOpenMetrics_0_0_1, nil + return FmtOpenMetrics_0_0_1, nil } if version == OpenMetricsVersion_1_0_0 { - return fmtOpenMetrics_1_0_0, nil + return FmtOpenMetrics_1_0_0, nil } - return fmtUnknown, fmt.Errorf("unknown open metrics version string") + return FmtUnknown, fmt.Errorf("unknown open metrics version string") +} + +// WithEscapingScheme returns a copy of Format with the specified escaping +// scheme appended to the end. If an escaping scheme already exists it is +// removed. +func (f Format) WithEscapingScheme(s model.EscapingScheme) Format { + var terms []string + for _, p := range strings.Split(string(f), ";") { + toks := strings.Split(p, "=") + if len(toks) != 2 { + trimmed := strings.TrimSpace(p) + if len(trimmed) > 0 { + terms = append(terms, trimmed) + } + continue + } + key := strings.TrimSpace(toks[0]) + if key != model.EscapingKey { + terms = append(terms, strings.TrimSpace(p)) + } + } + terms = append(terms, model.EscapingKey+"="+s.String()) + return Format(strings.Join(terms, "; ")) } // FormatType deduces an overall FormatType for the given format. diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index 353c5e93f92d8..11c8ff4b9dbc5 100644 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -477,7 +477,7 @@ func writeOpenMetricsNameAndLabelPairs( if name != "" { // If the name does not pass the legacy validity check, we must put the // metric name inside the braces, quoted. - if !model.IsValidLegacyMetricName(model.LabelValue(name)) { + if !model.IsValidLegacyMetricName(name) { metricInsideBraces = true err := w.WriteByte(separator) written++ diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go index f9b8265a9ec2a..4b86434b33275 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -354,7 +354,7 @@ func writeNameAndLabelPairs( if name != "" { // If the name does not pass the legacy validity check, we must put the // metric name inside the braces. - if !model.IsValidLegacyMetricName(model.LabelValue(name)) { + if !model.IsValidLegacyMetricName(name) { metricInsideBraces = true err := w.WriteByte(separator) written++ @@ -498,7 +498,7 @@ func writeInt(w enhancedWriter, i int64) (int, error) { // writeName writes a string as-is if it complies with the legacy naming // scheme, or escapes it in double quotes if not. func writeName(w enhancedWriter, name string) (int, error) { - if model.IsValidLegacyMetricName(model.LabelValue(name)) { + if model.IsValidLegacyMetricName(name) { return w.WriteString(name) } var written int diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index 26490211af2a5..f085a923f6cdc 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -22,9 +22,9 @@ import ( "math" "strconv" "strings" + "unicode/utf8" dto "github.com/prometheus/client_model/go" - "google.golang.org/protobuf/proto" "github.com/prometheus/common/model" @@ -60,6 +60,7 @@ type TextParser struct { currentMF *dto.MetricFamily currentMetric *dto.Metric currentLabelPair *dto.LabelPair + currentLabelPairs []*dto.LabelPair // Temporarily stores label pairs while parsing a metric line. // The remaining member variables are only used for summaries/histograms. currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' @@ -74,6 +75,9 @@ type TextParser struct { // count and sum of that summary/histogram. currentIsSummaryCount, currentIsSummarySum bool currentIsHistogramCount, currentIsHistogramSum bool + // These indicate if the metric name from the current line being parsed is inside + // braces and if that metric name was found respectively. + currentMetricIsInsideBraces, currentMetricInsideBracesIsPresent bool } // TextToMetricFamilies reads 'in' as the simple and flat text-based exchange @@ -137,12 +141,15 @@ func (p *TextParser) reset(in io.Reader) { } p.currentQuantile = math.NaN() p.currentBucket = math.NaN() + p.currentMF = nil } // startOfLine represents the state where the next byte read from p.buf is the // start of a line (or whitespace leading up to it). func (p *TextParser) startOfLine() stateFn { p.lineCount++ + p.currentMetricIsInsideBraces = false + p.currentMetricInsideBracesIsPresent = false if p.skipBlankTab(); p.err != nil { // This is the only place that we expect to see io.EOF, // which is not an error but the signal that we are done. @@ -158,6 +165,9 @@ func (p *TextParser) startOfLine() stateFn { return p.startComment case '\n': return p.startOfLine // Empty line, start the next one. + case '{': + p.currentMetricIsInsideBraces = true + return p.readingLabels } return p.readingMetricName } @@ -275,6 +285,8 @@ func (p *TextParser) startLabelName() stateFn { return nil // Unexpected end of input. } if p.currentByte == '}' { + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) + p.currentLabelPairs = nil if p.skipBlankTab(); p.err != nil { return nil // Unexpected end of input. } @@ -287,6 +299,45 @@ func (p *TextParser) startLabelName() stateFn { p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) return nil } + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '=' { + if p.currentMetricIsInsideBraces { + if p.currentMetricInsideBracesIsPresent { + p.parseError(fmt.Sprintf("multiple metric names for metric %q", p.currentMF.GetName())) + return nil + } + switch p.currentByte { + case ',': + p.setOrCreateCurrentMF() + if p.currentMF.Type == nil { + p.currentMF.Type = dto.MetricType_UNTYPED.Enum() + } + p.currentMetric = &dto.Metric{} + p.currentMetricInsideBracesIsPresent = true + return p.startLabelName + case '}': + p.setOrCreateCurrentMF() + if p.currentMF.Type == nil { + p.currentMF.Type = dto.MetricType_UNTYPED.Enum() + } + p.currentMetric = &dto.Metric{} + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) + p.currentLabelPairs = nil + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + default: + p.parseError(fmt.Sprintf("unexpected end of metric name %q", p.currentByte)) + return nil + } + } + p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) + p.currentLabelPairs = nil + return nil + } p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) @@ -296,23 +347,17 @@ func (p *TextParser) startLabelName() stateFn { // labels to 'real' labels. if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { - p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) - } - if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte != '=' { - p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) - return nil + p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair) } // Check for duplicate label names. labels := make(map[string]struct{}) - for _, l := range p.currentMetric.Label { + for _, l := range p.currentLabelPairs { lName := l.GetName() if _, exists := labels[lName]; !exists { labels[lName] = struct{}{} } else { p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName())) + p.currentLabelPairs = nil return nil } } @@ -345,6 +390,7 @@ func (p *TextParser) startLabelValue() stateFn { if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { // Create a more helpful error message. p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) + p.currentLabelPairs = nil return nil } } else { @@ -371,12 +417,19 @@ func (p *TextParser) startLabelValue() stateFn { return p.startLabelName case '}': + if p.currentMF == nil { + p.parseError("invalid metric name") + return nil + } + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) + p.currentLabelPairs = nil if p.skipBlankTab(); p.err != nil { return nil // Unexpected end of input. } return p.readingValue default: p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue())) + p.currentLabelPairs = nil return nil } } @@ -585,6 +638,8 @@ func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { p.currentToken.WriteByte(p.currentByte) case 'n': p.currentToken.WriteByte('\n') + case '"': + p.currentToken.WriteByte('"') default: p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) return @@ -610,13 +665,45 @@ func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { // but not into p.currentToken. func (p *TextParser) readTokenAsMetricName() { p.currentToken.Reset() + // A UTF-8 metric name must be quoted and may have escaped characters. + quoted := false + escaped := false if !isValidMetricNameStart(p.currentByte) { return } - for { - p.currentToken.WriteByte(p.currentByte) + for p.err == nil { + if escaped { + switch p.currentByte { + case '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + case '"': + p.currentToken.WriteByte('"') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + } else { + switch p.currentByte { + case '"': + quoted = !quoted + if !quoted { + p.currentByte, p.err = p.buf.ReadByte() + return + } + case '\n': + p.parseError(fmt.Sprintf("metric name %q contains unescaped new-line", p.currentToken.String())) + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidMetricNameContinuation(p.currentByte) { + if !isValidMetricNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == ' ') { return } } @@ -628,13 +715,45 @@ func (p *TextParser) readTokenAsMetricName() { // but not into p.currentToken. func (p *TextParser) readTokenAsLabelName() { p.currentToken.Reset() + // A UTF-8 label name must be quoted and may have escaped characters. + quoted := false + escaped := false if !isValidLabelNameStart(p.currentByte) { return } - for { - p.currentToken.WriteByte(p.currentByte) + for p.err == nil { + if escaped { + switch p.currentByte { + case '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + case '"': + p.currentToken.WriteByte('"') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + } else { + switch p.currentByte { + case '"': + quoted = !quoted + if !quoted { + p.currentByte, p.err = p.buf.ReadByte() + return + } + case '\n': + p.parseError(fmt.Sprintf("label name %q contains unescaped new-line", p.currentToken.String())) + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidLabelNameContinuation(p.currentByte) { + if !isValidLabelNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == '=') { return } } @@ -660,6 +779,7 @@ func (p *TextParser) readTokenAsLabelValue() { p.currentToken.WriteByte('\n') default: p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + p.currentLabelPairs = nil return } escaped = false @@ -718,19 +838,19 @@ func (p *TextParser) setOrCreateCurrentMF() { } func isValidLabelNameStart(b byte) bool { - return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == '"' } -func isValidLabelNameContinuation(b byte) bool { - return isValidLabelNameStart(b) || (b >= '0' && b <= '9') +func isValidLabelNameContinuation(b byte, quoted bool) bool { + return isValidLabelNameStart(b) || (b >= '0' && b <= '9') || (quoted && utf8.ValidString(string(b))) } func isValidMetricNameStart(b byte) bool { return isValidLabelNameStart(b) || b == ':' } -func isValidMetricNameContinuation(b byte) bool { - return isValidLabelNameContinuation(b) || b == ':' +func isValidMetricNameContinuation(b byte, quoted bool) bool { + return isValidLabelNameContinuation(b, quoted) || b == ':' } func isBlankOrTab(b byte) bool { diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go index 3317ce22ff73d..73b7aa3e60bdd 100644 --- a/vendor/github.com/prometheus/common/model/labels.go +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -97,26 +97,35 @@ var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") // therewith. type LabelName string -// IsValid returns true iff name matches the pattern of LabelNameRE for legacy -// names, and iff it's valid UTF-8 if NameValidationScheme is set to -// UTF8Validation. For the legacy matching, it does not use LabelNameRE for the -// check but a much faster hardcoded implementation. +// IsValid returns true iff the name matches the pattern of LabelNameRE when +// NameValidationScheme is set to LegacyValidation, or valid UTF-8 if +// NameValidationScheme is set to UTF8Validation. func (ln LabelName) IsValid() bool { if len(ln) == 0 { return false } switch NameValidationScheme { case LegacyValidation: - for i, b := range ln { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { - return false - } - } + return ln.IsValidLegacy() case UTF8Validation: return utf8.ValidString(string(ln)) default: panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) } +} + +// IsValidLegacy returns true iff name matches the pattern of LabelNameRE for +// legacy names. It does not use LabelNameRE for the check but a much faster +// hardcoded implementation. +func (ln LabelName) IsValidLegacy() bool { + if len(ln) == 0 { + return false + } + for i, b := range ln { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { + return false + } + } return true } diff --git a/vendor/github.com/prometheus/common/model/labelset_string.go b/vendor/github.com/prometheus/common/model/labelset_string.go index 481c47b46e5d4..abb2c9001831c 100644 --- a/vendor/github.com/prometheus/common/model/labelset_string.go +++ b/vendor/github.com/prometheus/common/model/labelset_string.go @@ -11,8 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build go1.21 - package model import ( diff --git a/vendor/github.com/prometheus/common/model/labelset_string_go120.go b/vendor/github.com/prometheus/common/model/labelset_string_go120.go deleted file mode 100644 index c4212685e71fc..0000000000000 --- a/vendor/github.com/prometheus/common/model/labelset_string_go120.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2024 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !go1.21 - -package model - -import ( - "fmt" - "sort" - "strings" -) - -// String was optimized using functions not available for go 1.20 -// or lower. We keep the old implementation for compatibility with client_golang. -// Once client golang drops support for go 1.20 (scheduled for August 2024), this -// file can be removed. -func (l LabelSet) String() string { - labelNames := make([]string, 0, len(l)) - for name := range l { - labelNames = append(labelNames, string(name)) - } - sort.Strings(labelNames) - lstrs := make([]string, 0, len(l)) - for _, name := range labelNames { - lstrs = append(lstrs, fmt.Sprintf("%s=%q", name, l[LabelName(name)])) - } - return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) -} diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go index eb865e5a59cf8..f50966bc494e4 100644 --- a/vendor/github.com/prometheus/common/model/metric.go +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -34,10 +34,13 @@ var ( // goroutines are started. NameValidationScheme = LegacyValidation - // NameEscapingScheme defines the default way that names will be - // escaped when presented to systems that do not support UTF-8 names. If the - // Content-Type "escaping" term is specified, that will override this value. - NameEscapingScheme = ValueEncodingEscaping + // NameEscapingScheme defines the default way that names will be escaped when + // presented to systems that do not support UTF-8 names. If the Content-Type + // "escaping" term is specified, that will override this value. + // NameEscapingScheme should not be set to the NoEscaping value. That string + // is used in content negotiation to indicate that a system supports UTF-8 and + // has that feature enabled. + NameEscapingScheme = UnderscoreEscaping ) // ValidationScheme is a Go enum for determining how metric and label names will @@ -161,7 +164,7 @@ func (m Metric) FastFingerprint() Fingerprint { func IsValidMetricName(n LabelValue) bool { switch NameValidationScheme { case LegacyValidation: - return IsValidLegacyMetricName(n) + return IsValidLegacyMetricName(string(n)) case UTF8Validation: if len(n) == 0 { return false @@ -176,7 +179,7 @@ func IsValidMetricName(n LabelValue) bool { // legacy validation scheme regardless of the value of NameValidationScheme. // This function, however, does not use MetricNameRE for the check but a much // faster hardcoded implementation. -func IsValidLegacyMetricName(n LabelValue) bool { +func IsValidLegacyMetricName(n string) bool { if len(n) == 0 { return false } @@ -208,7 +211,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF } // If the name is nil, copy as-is, don't try to escape. - if v.Name == nil || IsValidLegacyMetricName(LabelValue(v.GetName())) { + if v.Name == nil || IsValidLegacyMetricName(v.GetName()) { out.Name = v.Name } else { out.Name = proto.String(EscapeName(v.GetName(), scheme)) @@ -230,7 +233,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF for _, l := range m.Label { if l.GetName() == MetricNameLabel { - if l.Value == nil || IsValidLegacyMetricName(LabelValue(l.GetValue())) { + if l.Value == nil || IsValidLegacyMetricName(l.GetValue()) { escaped.Label = append(escaped.Label, l) continue } @@ -240,7 +243,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF }) continue } - if l.Name == nil || IsValidLegacyMetricName(LabelValue(l.GetName())) { + if l.Name == nil || IsValidLegacyMetricName(l.GetName()) { escaped.Label = append(escaped.Label, l) continue } @@ -256,10 +259,10 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF func metricNeedsEscaping(m *dto.Metric) bool { for _, l := range m.Label { - if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(LabelValue(l.GetValue())) { + if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(l.GetValue()) { return true } - if !IsValidLegacyMetricName(LabelValue(l.GetName())) { + if !IsValidLegacyMetricName(l.GetName()) { return true } } @@ -283,7 +286,7 @@ func EscapeName(name string, scheme EscapingScheme) string { case NoEscaping: return name case UnderscoreEscaping: - if IsValidLegacyMetricName(LabelValue(name)) { + if IsValidLegacyMetricName(name) { return name } for i, b := range name { @@ -309,7 +312,7 @@ func EscapeName(name string, scheme EscapingScheme) string { } return escaped.String() case ValueEncodingEscaping: - if IsValidLegacyMetricName(LabelValue(name)) { + if IsValidLegacyMetricName(name) { return name } escaped.WriteString("U__") @@ -452,6 +455,6 @@ func ToEscapingScheme(s string) (EscapingScheme, error) { case EscapeValues: return ValueEncodingEscaping, nil default: - return NoEscaping, fmt.Errorf("unknown format scheme " + s) + return NoEscaping, fmt.Errorf("unknown format scheme %s", s) } } diff --git a/vendor/go.etcd.io/bbolt/.gitignore b/vendor/go.etcd.io/bbolt/.gitignore index 9fa948ebf9d5f..ed4d259db23ff 100644 --- a/vendor/go.etcd.io/bbolt/.gitignore +++ b/vendor/go.etcd.io/bbolt/.gitignore @@ -6,5 +6,7 @@ cover.out cover-*.out /.idea *.iml +/bbolt /cmd/bbolt/bbolt +.DS_Store diff --git a/vendor/go.etcd.io/bbolt/.go-version b/vendor/go.etcd.io/bbolt/.go-version index 013173af5e9bc..14bee92c9e72b 100644 --- a/vendor/go.etcd.io/bbolt/.go-version +++ b/vendor/go.etcd.io/bbolt/.go-version @@ -1 +1 @@ -1.22.6 +1.23.2 diff --git a/vendor/go.etcd.io/bbolt/Makefile b/vendor/go.etcd.io/bbolt/Makefile index 21407797416eb..f5a6703a0bb32 100644 --- a/vendor/go.etcd.io/bbolt/Makefile +++ b/vendor/go.etcd.io/bbolt/Makefile @@ -1,6 +1,7 @@ BRANCH=`git rev-parse --abbrev-ref HEAD` COMMIT=`git rev-parse --short HEAD` GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" +GOFILES = $(shell find . -name \*.go) TESTFLAGS_RACE=-race=false ifdef ENABLE_RACE @@ -13,9 +14,26 @@ ifdef CPU endif TESTFLAGS = $(TESTFLAGS_RACE) $(TESTFLAGS_CPU) $(EXTRA_TESTFLAGS) +TESTFLAGS_TIMEOUT=30m +ifdef TIMEOUT + TESTFLAGS_TIMEOUT=$(TIMEOUT) +endif + +TESTFLAGS_ENABLE_STRICT_MODE=false +ifdef ENABLE_STRICT_MODE + TESTFLAGS_ENABLE_STRICT_MODE=$(ENABLE_STRICT_MODE) +endif + +.EXPORT_ALL_VARIABLES: +TEST_ENABLE_STRICT_MODE=${TESTFLAGS_ENABLE_STRICT_MODE} + .PHONY: fmt fmt: - !(gofmt -l -s -d $(shell find . -name \*.go) | grep '[a-z]') + @echo "Verifying gofmt, failures can be fixed with ./scripts/fix.sh" + @!(gofmt -l -s -d ${GOFILES} | grep '[a-z]') + + @echo "Verifying goimports, failures can be fixed with ./scripts/fix.sh" + @!(go run golang.org/x/tools/cmd/goimports@latest -l -d ${GOFILES} | grep '[a-z]') .PHONY: lint lint: @@ -24,21 +42,23 @@ lint: .PHONY: test test: @echo "hashmap freelist test" - TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} -timeout 30m - TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} ./cmd/bbolt + BBOLT_VERIFY=all TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} -timeout ${TESTFLAGS_TIMEOUT} + BBOLT_VERIFY=all TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} ./internal/... + BBOLT_VERIFY=all TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} ./cmd/bbolt @echo "array freelist test" - TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout 30m - TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} ./cmd/bbolt + BBOLT_VERIFY=all TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout ${TESTFLAGS_TIMEOUT} + BBOLT_VERIFY=all TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} ./internal/... + BBOLT_VERIFY=all TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} ./cmd/bbolt .PHONY: coverage coverage: @echo "hashmap freelist test" - TEST_FREELIST_TYPE=hashmap go test -v -timeout 30m \ + TEST_FREELIST_TYPE=hashmap go test -v -timeout ${TESTFLAGS_TIMEOUT} \ -coverprofile cover-freelist-hashmap.out -covermode atomic @echo "array freelist test" - TEST_FREELIST_TYPE=array go test -v -timeout 30m \ + TEST_FREELIST_TYPE=array go test -v -timeout ${TESTFLAGS_TIMEOUT} \ -coverprofile cover-freelist-array.out -covermode atomic BOLT_CMD=bbolt @@ -55,7 +75,7 @@ gofail-enable: install-gofail gofail enable . .PHONY: gofail-disable -gofail-disable: +gofail-disable: install-gofail gofail disable . .PHONY: install-gofail @@ -65,12 +85,24 @@ install-gofail: .PHONY: test-failpoint test-failpoint: @echo "[failpoint] hashmap freelist test" - TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint + BBOLT_VERIFY=all TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint @echo "[failpoint] array freelist test" - TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint - -.PHONY: test-robustness # Running robustness tests requires root permission -test-robustness: - go test -v ${TESTFLAGS} ./tests/dmflakey -test.root - go test -v ${TESTFLAGS} ./tests/robustness -test.root + BBOLT_VERIFY=all TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint + +.PHONY: test-robustness # Running robustness tests requires root permission for now +# TODO: Remove sudo once we fully migrate to the prow infrastructure +test-robustness: gofail-enable build + sudo env PATH=$$PATH go test -v ${TESTFLAGS} ./tests/dmflakey -test.root + sudo env PATH=$(PWD)/bin:$$PATH go test -v ${TESTFLAGS} ${ROBUSTNESS_TESTFLAGS} ./tests/robustness -test.root + +.PHONY: test-benchmark-compare +# Runs benchmark tests on the current git ref and the given REF, and compares +# the two. +test-benchmark-compare: install-benchstat + @git fetch + ./scripts/compare_benchmarks.sh $(REF) + +.PHONY: install-benchstat +install-benchstat: + go install golang.org/x/perf/cmd/benchstat@latest diff --git a/vendor/go.etcd.io/bbolt/OWNERS b/vendor/go.etcd.io/bbolt/OWNERS new file mode 100644 index 0000000000000..91f168a798085 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/OWNERS @@ -0,0 +1,10 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - ahrtr # Benjamin Wang + - serathius # Marek Siarkowicz + - ptabor # Piotr Tabor + - spzala # Sahdev Zala +reviewers: + - fuweid # Wei Fu + - tjungblu # Thomas Jungblut diff --git a/vendor/go.etcd.io/bbolt/README.md b/vendor/go.etcd.io/bbolt/README.md index 495a93ef8f38d..d27d3a783fd5b 100644 --- a/vendor/go.etcd.io/bbolt/README.md +++ b/vendor/go.etcd.io/bbolt/README.md @@ -1,10 +1,8 @@ bbolt ===== -[![Go Report Card](https://goreportcard.com/badge/github.com/etcd-io/bbolt?style=flat-square)](https://goreportcard.com/report/github.com/etcd-io/bbolt) -[![Coverage](https://codecov.io/gh/etcd-io/bbolt/branch/master/graph/badge.svg)](https://codecov.io/gh/etcd-io/bbolt) -[![Build Status Travis](https://img.shields.io/travis/etcd-io/bboltlabs.svg?style=flat-square&&branch=master)](https://travis-ci.com/etcd-io/bbolt) -[![Godoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/etcd-io/bbolt) +[![Go Report Card](https://goreportcard.com/badge/go.etcd.io/bbolt?style=flat-square)](https://goreportcard.com/report/go.etcd.io/bbolt) +[![Go Reference](https://pkg.go.dev/badge/go.etcd.io/bbolt.svg)](https://pkg.go.dev/go.etcd.io/bbolt) [![Releases](https://img.shields.io/github/release/etcd-io/bbolt/all.svg?style=flat-square)](https://github.com/etcd-io/bbolt/releases) [![LICENSE](https://img.shields.io/github/license/etcd-io/bbolt.svg?style=flat-square)](https://github.com/etcd-io/bbolt/blob/master/LICENSE) @@ -71,6 +69,7 @@ New minor versions may add additional features to the API. - [LMDB](#lmdb) - [Caveats & Limitations](#caveats--limitations) - [Reading the Source](#reading-the-source) + - [Known Issues](#known-issues) - [Other Projects Using Bolt](#other-projects-using-bolt) ## Getting Started @@ -103,7 +102,7 @@ To use bbolt as an embedded key-value store, import as: ```go import bolt "go.etcd.io/bbolt" -db, err := bolt.Open(path, 0666, nil) +db, err := bolt.Open(path, 0600, nil) if err != nil { return err } @@ -298,6 +297,17 @@ db.Update(func(tx *bolt.Tx) error { }) ``` +You can retrieve an existing bucket using the `Tx.Bucket()` function: +```go +db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("MyBucket")) + if b == nil { + return errors.New("bucket does not exist") + } + return nil +}) +``` + You can also create a bucket only if it doesn't exist by using the `Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this function for all your top-level buckets after you open your database so you can @@ -305,6 +315,17 @@ guarantee that they exist for future transactions. To delete a bucket, simply call the `Tx.DeleteBucket()` function. +You can also iterate over all existing top-level buckets with `Tx.ForEach()`: + +```go +db.View(func(tx *bolt.Tx) error { + tx.ForEach(func(name []byte, b *bolt.Bucket) error { + fmt.Println(string(name)) + return nil + }) + return nil +}) +``` ### Using key/value pairs @@ -336,7 +357,17 @@ exists then it will return its byte slice value. If it doesn't exist then it will return `nil`. It's important to note that you can have a zero-length value set to a key which is different than the key not existing. -Use the `Bucket.Delete()` function to delete a key from the bucket. +Use the `Bucket.Delete()` function to delete a key from the bucket: + +```go +db.Update(func (tx *bolt.Tx) error { + b := tx.Bucket([]byte("MyBucket")) + err := b.Delete([]byte("answer")) + return err +}) +``` + +This will delete the key `answers` from the bucket `MyBucket`. Please note that values returned from `Get()` are only valid while the transaction is open. If you need to use a value outside of the transaction @@ -654,7 +685,7 @@ uses a shared lock to allow multiple processes to read from the database but it will block any processes from opening the database in read-write mode. ```go -db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true}) +db, err := bolt.Open("my.db", 0600, &bolt.Options{ReadOnly: true}) if err != nil { log.Fatal(err) } @@ -890,7 +921,7 @@ The best places to start are the main entry points into Bolt: - `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the arguments, a cursor is used to traverse the B+tree to the page and position - where they key & value will be written. Once the position is found, the bucket + where the key & value will be written. Once the position is found, the bucket materializes the underlying page and the page's parent pages into memory as "nodes". These nodes are where mutations occur during read-write transactions. These changes get flushed to disk during commit. @@ -919,6 +950,21 @@ The best places to start are the main entry points into Bolt: If you have additional notes that could be helpful for others, please submit them via pull request. +## Known Issues + +- bbolt might run into data corruption issue on Linux when the feature + [ext4: fast commit](https://lwn.net/Articles/842385/), which was introduced in + linux kernel version v5.10, is enabled. The fixes to the issue were included in + linux kernel version v5.17, please refer to links below, + + * [ext4: fast commit may miss tracking unwritten range during ftruncate](https://lore.kernel.org/linux-ext4/20211223032337.5198-3-yinxin.x@bytedance.com/) + * [ext4: fast commit may not fallback for ineligible commit](https://lore.kernel.org/lkml/202201091544.W5HHEXAp-lkp@intel.com/T/#ma0768815e4b5f671e9e451d578256ef9a76fe30e) + * [ext4 updates for 5.17](https://lore.kernel.org/lkml/YdyxjTFaLWif6BCM@mit.edu/) + + Please also refer to the discussion in https://github.com/etcd-io/bbolt/issues/562. + +- Writing a value with a length of 0 will always result in reading back an empty `[]byte{}` value. + Please refer to [issues/726#issuecomment-2061694802](https://github.com/etcd-io/bbolt/issues/726#issuecomment-2061694802). ## Other Projects Using Bolt @@ -934,13 +980,16 @@ Below is a list of public, open source projects that use Bolt: * [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files. * [BoltDB Viewer](https://github.com/zc310/rich_boltdb) - A BoltDB Viewer Can run on Windows、Linux、Android system. * [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend. +* [bstore](https://github.com/mjl-/bstore) - Database library storing Go values, with referential/unique/nonzero constraints, indices, automatic schema management with struct tags, and a query API. * [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet. * [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining simple tx and key scans. +* [Buildkit](https://github.com/moby/buildkit) - concurrent, cache-efficient, and Dockerfile-agnostic builder toolkit * [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend. * [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations. * [🌰 Chestnut](https://github.com/jrapoport/chestnut) - Chestnut is encrypted storage for Go. * [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware. +* [Containerd](https://github.com/containerd/containerd) - An open and reliable container runtime * [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb. * [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency. * [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems. @@ -964,6 +1013,7 @@ Below is a list of public, open source projects that use Bolt: * [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite. * [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files. * [NATS](https://github.com/nats-io/nats-streaming-server) - NATS Streaming uses bbolt for message and metadata storage. +* [Portainer](https://github.com/portainer/portainer) - A lightweight service delivery platform for containerized applications that can be used to manage Docker, Swarm, Kubernetes and ACI environments. * [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system. * [Rain](https://github.com/cenkalti/rain) - BitTorrent client and library. * [reef-pi](https://github.com/reef-pi/reef-pi) - reef-pi is an award winning, modular, DIY reef tank controller using easy to learn electronics based on a Raspberry Pi. diff --git a/vendor/go.etcd.io/bbolt/bolt_unix_aix.go b/vendor/go.etcd.io/bbolt/bolt_aix.go similarity index 100% rename from vendor/go.etcd.io/bbolt/bolt_unix_aix.go rename to vendor/go.etcd.io/bbolt/bolt_aix.go diff --git a/vendor/go.etcd.io/bbolt/bolt_android.go b/vendor/go.etcd.io/bbolt/bolt_android.go new file mode 100644 index 0000000000000..11890f0d70592 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/bolt_android.go @@ -0,0 +1,90 @@ +package bbolt + +import ( + "fmt" + "syscall" + "time" + "unsafe" + + "golang.org/x/sys/unix" +) + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, exclusive bool, timeout time.Duration) error { + var t time.Time + if timeout != 0 { + t = time.Now() + } + fd := db.file.Fd() + var lockType int16 + if exclusive { + lockType = syscall.F_WRLCK + } else { + lockType = syscall.F_RDLCK + } + for { + // Attempt to obtain an exclusive lock. + lock := syscall.Flock_t{Type: lockType} + err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock) + if err == nil { + return nil + } else if err != syscall.EAGAIN { + return err + } + + // If we timed out then return an error. + if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { + return ErrTimeout + } + + // Wait for a bit and try again. + time.Sleep(flockRetryTimeout) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Type = syscall.F_UNLCK + lock.Whence = 0 + return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock) +} + +// mmap memory maps a DB's data file. +func mmap(db *DB, sz int) error { + // Map the data file to memory. + b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) + if err != nil { + return err + } + + // Advise the kernel that the mmap is accessed randomly. + err = unix.Madvise(b, syscall.MADV_RANDOM) + if err != nil && err != syscall.ENOSYS { + // Ignore not implemented error in kernel because it still works. + return fmt.Errorf("madvise: %s", err) + } + + // Save the original byte slice and convert to a byte array pointer. + db.dataref = b + db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.datasz = sz + return nil +} + +// munmap unmaps a DB's data file from memory. +func munmap(db *DB) error { + // Ignore the unmap if we have no mapped data. + if db.dataref == nil { + return nil + } + + // Unmap using the original byte slice. + err := unix.Munmap(db.dataref) + db.dataref = nil + db.data = nil + db.datasz = 0 + return err +} diff --git a/vendor/go.etcd.io/bbolt/bolt_unix_solaris.go b/vendor/go.etcd.io/bbolt/bolt_solaris.go similarity index 100% rename from vendor/go.etcd.io/bbolt/bolt_unix_solaris.go rename to vendor/go.etcd.io/bbolt/bolt_solaris.go diff --git a/vendor/go.etcd.io/bbolt/bolt_unix.go b/vendor/go.etcd.io/bbolt/bolt_unix.go index 757ae4d1a484f..aaa48d241d854 100644 --- a/vendor/go.etcd.io/bbolt/bolt_unix.go +++ b/vendor/go.etcd.io/bbolt/bolt_unix.go @@ -1,5 +1,5 @@ -//go:build !windows && !plan9 && !solaris && !aix -// +build !windows,!plan9,!solaris,!aix +//go:build !windows && !plan9 && !solaris && !aix && !android +// +build !windows,!plan9,!solaris,!aix,!android package bbolt @@ -10,6 +10,8 @@ import ( "unsafe" "golang.org/x/sys/unix" + + "go.etcd.io/bbolt/errors" ) // flock acquires an advisory lock on a file descriptor. @@ -36,7 +38,7 @@ func flock(db *DB, exclusive bool, timeout time.Duration) error { // If we timed out then return an error. if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { - return ErrTimeout + return errors.ErrTimeout } // Wait for a bit and try again. diff --git a/vendor/go.etcd.io/bbolt/bolt_windows.go b/vendor/go.etcd.io/bbolt/bolt_windows.go index e5dde27454f29..ec21ecb85c7d8 100644 --- a/vendor/go.etcd.io/bbolt/bolt_windows.go +++ b/vendor/go.etcd.io/bbolt/bolt_windows.go @@ -8,6 +8,8 @@ import ( "unsafe" "golang.org/x/sys/windows" + + "go.etcd.io/bbolt/errors" ) // fdatasync flushes written data to a file descriptor. @@ -42,7 +44,7 @@ func flock(db *DB, exclusive bool, timeout time.Duration) error { // If we timed oumercit then return an error. if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { - return ErrTimeout + return errors.ErrTimeout } // Wait for a bit and try again. @@ -70,7 +72,7 @@ func mmap(db *DB, sz int) error { return fmt.Errorf("truncate: %s", err) } sizehi = uint32(sz >> 32) - sizelo = uint32(sz) & 0xffffffff + sizelo = uint32(sz) } // Open a file mapping handle. @@ -93,7 +95,7 @@ func mmap(db *DB, sz int) error { } // Convert to a byte array. - db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr))) + db.data = (*[maxMapSize]byte)(unsafe.Pointer(addr)) db.datasz = sz return nil diff --git a/vendor/go.etcd.io/bbolt/bucket.go b/vendor/go.etcd.io/bbolt/bucket.go index f3533d3446b8d..6371ace972ce1 100644 --- a/vendor/go.etcd.io/bbolt/bucket.go +++ b/vendor/go.etcd.io/bbolt/bucket.go @@ -4,6 +4,9 @@ import ( "bytes" "fmt" "unsafe" + + "go.etcd.io/bbolt/errors" + "go.etcd.io/bbolt/internal/common" ) const ( @@ -14,8 +17,6 @@ const ( MaxValueSize = (1 << 31) - 2 ) -const bucketHeaderSize = int(unsafe.Sizeof(bucket{})) - const ( minFillPercent = 0.1 maxFillPercent = 1.0 @@ -27,12 +28,12 @@ const DefaultFillPercent = 0.5 // Bucket represents a collection of key/value pairs inside the database. type Bucket struct { - *bucket - tx *Tx // the associated transaction - buckets map[string]*Bucket // subbucket cache - page *page // inline page reference - rootNode *node // materialized node for the root page. - nodes map[pgid]*node // node cache + *common.InBucket + tx *Tx // the associated transaction + buckets map[string]*Bucket // subbucket cache + page *common.Page // inline page reference + rootNode *node // materialized node for the root page. + nodes map[common.Pgid]*node // node cache // Sets the threshold for filling nodes when they split. By default, // the bucket will fill to 50% but it can be useful to increase this @@ -42,21 +43,12 @@ type Bucket struct { FillPercent float64 } -// bucket represents the on-file representation of a bucket. -// This is stored as the "value" of a bucket key. If the bucket is small enough, -// then its root page can be stored inline in the "value", after the bucket -// header. In the case of inline buckets, the "root" will be 0. -type bucket struct { - root pgid // page id of the bucket's root-level page - sequence uint64 // monotonically incrementing, used by NextSequence() -} - // newBucket returns a new bucket associated with a transaction. func newBucket(tx *Tx) Bucket { var b = Bucket{tx: tx, FillPercent: DefaultFillPercent} if tx.writable { b.buckets = make(map[string]*Bucket) - b.nodes = make(map[pgid]*node) + b.nodes = make(map[common.Pgid]*node) } return b } @@ -67,8 +59,8 @@ func (b *Bucket) Tx() *Tx { } // Root returns the root of the bucket. -func (b *Bucket) Root() pgid { - return b.root +func (b *Bucket) Root() common.Pgid { + return b.RootPage() } // Writable returns whether the bucket is writable. @@ -105,7 +97,7 @@ func (b *Bucket) Bucket(name []byte) *Bucket { k, v, flags := c.seek(name) // Return nil if the key doesn't exist or it is not a bucket. - if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 { + if !bytes.Equal(name, k) || (flags&common.BucketLeafFlag) == 0 { return nil } @@ -125,8 +117,8 @@ func (b *Bucket) openBucket(value []byte) *Bucket { // Unaligned access requires a copy to be made. const unalignedMask = unsafe.Alignof(struct { - bucket - page + common.InBucket + common.Page }{}) - 1 unaligned := uintptr(unsafe.Pointer(&value[0]))&unalignedMask != 0 if unaligned { @@ -136,15 +128,15 @@ func (b *Bucket) openBucket(value []byte) *Bucket { // If this is a writable transaction then we need to copy the bucket entry. // Read-only transactions can point directly at the mmap entry. if b.tx.writable && !unaligned { - child.bucket = &bucket{} - *child.bucket = *(*bucket)(unsafe.Pointer(&value[0])) + child.InBucket = &common.InBucket{} + *child.InBucket = *(*common.InBucket)(unsafe.Pointer(&value[0])) } else { - child.bucket = (*bucket)(unsafe.Pointer(&value[0])) + child.InBucket = (*common.InBucket)(unsafe.Pointer(&value[0])) } // Save a reference to the inline page if the bucket is inline. - if child.root == 0 { - child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) + if child.RootPage() == 0 { + child.page = (*common.Page)(unsafe.Pointer(&value[common.BucketHeaderSize])) } return &child @@ -153,13 +145,23 @@ func (b *Bucket) openBucket(value []byte) *Bucket { // CreateBucket creates a new bucket at the given key and returns the new bucket. // Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long. // The bucket instance is only valid for the lifetime of the transaction. -func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { +func (b *Bucket) CreateBucket(key []byte) (rb *Bucket, err error) { + if lg := b.tx.db.Logger(); lg != discardLogger { + lg.Debugf("Creating bucket %q", key) + defer func() { + if err != nil { + lg.Errorf("Creating bucket %q failed: %v", key, err) + } else { + lg.Debugf("Creating bucket %q successfully", key) + } + }() + } if b.tx.db == nil { - return nil, ErrTxClosed + return nil, errors.ErrTxClosed } else if !b.tx.writable { - return nil, ErrTxNotWritable + return nil, errors.ErrTxNotWritable } else if len(key) == 0 { - return nil, ErrBucketNameRequired + return nil, errors.ErrBucketNameRequired } // Insert into node. @@ -173,21 +175,21 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { // Return an error if there is an existing key. if bytes.Equal(newKey, k) { - if (flags & bucketLeafFlag) != 0 { - return nil, ErrBucketExists + if (flags & common.BucketLeafFlag) != 0 { + return nil, errors.ErrBucketExists } - return nil, ErrIncompatibleValue + return nil, errors.ErrIncompatibleValue } // Create empty, inline bucket. var bucket = Bucket{ - bucket: &bucket{}, + InBucket: &common.InBucket{}, rootNode: &node{isLeaf: true}, FillPercent: DefaultFillPercent, } var value = bucket.write() - c.node().put(newKey, newKey, value, 0, bucketLeafFlag) + c.node().put(newKey, newKey, value, 0, common.BucketLeafFlag) // Since subbuckets are not allowed on inline buckets, we need to // dereference the inline page, if it exists. This will cause the bucket @@ -200,39 +202,108 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { // CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it. // Returns an error if the bucket name is blank, or if the bucket name is too long. // The bucket instance is only valid for the lifetime of the transaction. -func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { - child, err := b.CreateBucket(key) - if err == ErrBucketExists { - return b.Bucket(key), nil - } else if err != nil { - return nil, err +func (b *Bucket) CreateBucketIfNotExists(key []byte) (rb *Bucket, err error) { + if lg := b.tx.db.Logger(); lg != discardLogger { + lg.Debugf("Creating bucket if not exist %q", key) + defer func() { + if err != nil { + lg.Errorf("Creating bucket if not exist %q failed: %v", key, err) + } else { + lg.Debugf("Creating bucket if not exist %q successfully", key) + } + }() + } + + if b.tx.db == nil { + return nil, errors.ErrTxClosed + } else if !b.tx.writable { + return nil, errors.ErrTxNotWritable + } else if len(key) == 0 { + return nil, errors.ErrBucketNameRequired + } + + // Insert into node. + // Tip: Use a new variable `newKey` instead of reusing the existing `key` to prevent + // it from being marked as leaking, and accordingly cannot be allocated on stack. + newKey := cloneBytes(key) + + if b.buckets != nil { + if child := b.buckets[string(newKey)]; child != nil { + return child, nil + } + } + + // Move cursor to correct position. + c := b.Cursor() + k, v, flags := c.seek(newKey) + + // Return an error if there is an existing non-bucket key. + if bytes.Equal(newKey, k) { + if (flags & common.BucketLeafFlag) != 0 { + var child = b.openBucket(v) + if b.buckets != nil { + b.buckets[string(newKey)] = child + } + + return child, nil + } + return nil, errors.ErrIncompatibleValue + } + + // Create empty, inline bucket. + var bucket = Bucket{ + InBucket: &common.InBucket{}, + rootNode: &node{isLeaf: true}, + FillPercent: DefaultFillPercent, } - return child, nil + var value = bucket.write() + + c.node().put(newKey, newKey, value, 0, common.BucketLeafFlag) + + // Since subbuckets are not allowed on inline buckets, we need to + // dereference the inline page, if it exists. This will cause the bucket + // to be treated as a regular, non-inline bucket for the rest of the tx. + b.page = nil + + return b.Bucket(newKey), nil } // DeleteBucket deletes a bucket at the given key. // Returns an error if the bucket does not exist, or if the key represents a non-bucket value. -func (b *Bucket) DeleteBucket(key []byte) error { +func (b *Bucket) DeleteBucket(key []byte) (err error) { + if lg := b.tx.db.Logger(); lg != discardLogger { + lg.Debugf("Deleting bucket %q", key) + defer func() { + if err != nil { + lg.Errorf("Deleting bucket %q failed: %v", key, err) + } else { + lg.Debugf("Deleting bucket %q successfully", key) + } + }() + } + if b.tx.db == nil { - return ErrTxClosed + return errors.ErrTxClosed } else if !b.Writable() { - return ErrTxNotWritable + return errors.ErrTxNotWritable } + newKey := cloneBytes(key) + // Move cursor to correct position. c := b.Cursor() - k, _, flags := c.seek(key) + k, _, flags := c.seek(newKey) // Return an error if bucket doesn't exist or is not a bucket. - if !bytes.Equal(key, k) { - return ErrBucketNotFound - } else if (flags & bucketLeafFlag) == 0 { - return ErrIncompatibleValue + if !bytes.Equal(newKey, k) { + return errors.ErrBucketNotFound + } else if (flags & common.BucketLeafFlag) == 0 { + return errors.ErrIncompatibleValue } // Recursively delete all child buckets. - child := b.Bucket(key) - err := child.ForEachBucket(func(k []byte) error { + child := b.Bucket(newKey) + err = child.ForEachBucket(func(k []byte) error { if err := child.DeleteBucket(k); err != nil { return fmt.Errorf("delete bucket: %s", err) } @@ -243,7 +314,7 @@ func (b *Bucket) DeleteBucket(key []byte) error { } // Remove cached copy. - delete(b.buckets, string(key)) + delete(b.buckets, string(newKey)) // Release all bucket pages to freelist. child.nodes = nil @@ -251,19 +322,119 @@ func (b *Bucket) DeleteBucket(key []byte) error { child.free() // Delete the node if we have a matching key. - c.node().del(key) + c.node().del(newKey) return nil } +// MoveBucket moves a sub-bucket from the source bucket to the destination bucket. +// Returns an error if +// 1. the sub-bucket cannot be found in the source bucket; +// 2. or the key already exists in the destination bucket; +// 3. or the key represents a non-bucket value; +// 4. the source and destination buckets are the same. +func (b *Bucket) MoveBucket(key []byte, dstBucket *Bucket) (err error) { + lg := b.tx.db.Logger() + if lg != discardLogger { + lg.Debugf("Moving bucket %q", key) + defer func() { + if err != nil { + lg.Errorf("Moving bucket %q failed: %v", key, err) + } else { + lg.Debugf("Moving bucket %q successfully", key) + } + }() + } + + if b.tx.db == nil || dstBucket.tx.db == nil { + return errors.ErrTxClosed + } else if !b.Writable() || !dstBucket.Writable() { + return errors.ErrTxNotWritable + } + + if b.tx.db.Path() != dstBucket.tx.db.Path() || b.tx != dstBucket.tx { + lg.Errorf("The source and target buckets are not in the same db file, source bucket in %s and target bucket in %s", b.tx.db.Path(), dstBucket.tx.db.Path()) + return errors.ErrDifferentDB + } + + newKey := cloneBytes(key) + + // Move cursor to correct position. + c := b.Cursor() + k, v, flags := c.seek(newKey) + + // Return an error if bucket doesn't exist or is not a bucket. + if !bytes.Equal(newKey, k) { + return errors.ErrBucketNotFound + } else if (flags & common.BucketLeafFlag) == 0 { + lg.Errorf("An incompatible key %s exists in the source bucket", newKey) + return errors.ErrIncompatibleValue + } + + // Do nothing (return true directly) if the source bucket and the + // destination bucket are actually the same bucket. + if b == dstBucket || (b.RootPage() == dstBucket.RootPage() && b.RootPage() != 0) { + lg.Errorf("The source bucket (%s) and the target bucket (%s) are the same bucket", b, dstBucket) + return errors.ErrSameBuckets + } + + // check whether the key already exists in the destination bucket + curDst := dstBucket.Cursor() + k, _, flags = curDst.seek(newKey) + + // Return an error if there is an existing key in the destination bucket. + if bytes.Equal(newKey, k) { + if (flags & common.BucketLeafFlag) != 0 { + return errors.ErrBucketExists + } + lg.Errorf("An incompatible key %s exists in the target bucket", newKey) + return errors.ErrIncompatibleValue + } + + // remove the sub-bucket from the source bucket + delete(b.buckets, string(newKey)) + c.node().del(newKey) + + // add te sub-bucket to the destination bucket + newValue := cloneBytes(v) + curDst.node().put(newKey, newKey, newValue, 0, common.BucketLeafFlag) + + return nil +} + +// Inspect returns the structure of the bucket. +func (b *Bucket) Inspect() BucketStructure { + return b.recursivelyInspect([]byte("root")) +} + +func (b *Bucket) recursivelyInspect(name []byte) BucketStructure { + bs := BucketStructure{Name: string(name)} + + keyN := 0 + c := b.Cursor() + for k, _, flags := c.first(); k != nil; k, _, flags = c.next() { + if flags&common.BucketLeafFlag != 0 { + childBucket := b.Bucket(k) + childBS := childBucket.recursivelyInspect(k) + bs.Children = append(bs.Children, childBS) + } else { + keyN++ + } + } + bs.KeyN = keyN + + return bs +} + // Get retrieves the value for a key in the bucket. // Returns a nil value if the key does not exist or if the key is a nested bucket. // The returned value is only valid for the life of the transaction. +// The returned memory is owned by bbolt and must never be modified; writing to this memory might corrupt the database. func (b *Bucket) Get(key []byte) []byte { k, v, flags := b.Cursor().seek(key) // Return nil if this is a bucket. - if (flags & bucketLeafFlag) != 0 { + if (flags & common.BucketLeafFlag) != 0 { return nil } @@ -278,17 +449,27 @@ func (b *Bucket) Get(key []byte) []byte { // If the key exist then its previous value will be overwritten. // Supplied value must remain valid for the life of the transaction. // Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large. -func (b *Bucket) Put(key []byte, value []byte) error { +func (b *Bucket) Put(key []byte, value []byte) (err error) { + if lg := b.tx.db.Logger(); lg != discardLogger { + lg.Debugf("Putting key %q", key) + defer func() { + if err != nil { + lg.Errorf("Putting key %q failed: %v", key, err) + } else { + lg.Debugf("Putting key %q successfully", key) + } + }() + } if b.tx.db == nil { - return ErrTxClosed + return errors.ErrTxClosed } else if !b.Writable() { - return ErrTxNotWritable + return errors.ErrTxNotWritable } else if len(key) == 0 { - return ErrKeyRequired + return errors.ErrKeyRequired } else if len(key) > MaxKeySize { - return ErrKeyTooLarge + return errors.ErrKeyTooLarge } else if int64(len(value)) > MaxValueSize { - return ErrValueTooLarge + return errors.ErrValueTooLarge } // Insert into node. @@ -301,8 +482,8 @@ func (b *Bucket) Put(key []byte, value []byte) error { k, _, flags := c.seek(newKey) // Return an error if there is an existing key with a bucket value. - if bytes.Equal(newKey, k) && (flags&bucketLeafFlag) != 0 { - return ErrIncompatibleValue + if bytes.Equal(newKey, k) && (flags&common.BucketLeafFlag) != 0 { + return errors.ErrIncompatibleValue } // gofail: var beforeBucketPut struct{} @@ -315,11 +496,22 @@ func (b *Bucket) Put(key []byte, value []byte) error { // Delete removes a key from the bucket. // If the key does not exist then nothing is done and a nil error is returned. // Returns an error if the bucket was created from a read-only transaction. -func (b *Bucket) Delete(key []byte) error { +func (b *Bucket) Delete(key []byte) (err error) { + if lg := b.tx.db.Logger(); lg != discardLogger { + lg.Debugf("Deleting key %q", key) + defer func() { + if err != nil { + lg.Errorf("Deleting key %q failed: %v", key, err) + } else { + lg.Debugf("Deleting key %q successfully", key) + } + }() + } + if b.tx.db == nil { - return ErrTxClosed + return errors.ErrTxClosed } else if !b.Writable() { - return ErrTxNotWritable + return errors.ErrTxNotWritable } // Move cursor to correct position. @@ -332,8 +524,8 @@ func (b *Bucket) Delete(key []byte) error { } // Return an error if there is already existing bucket value. - if (flags & bucketLeafFlag) != 0 { - return ErrIncompatibleValue + if (flags & common.BucketLeafFlag) != 0 { + return errors.ErrIncompatibleValue } // Delete the node if we have a matching key. @@ -343,44 +535,46 @@ func (b *Bucket) Delete(key []byte) error { } // Sequence returns the current integer for the bucket without incrementing it. -func (b *Bucket) Sequence() uint64 { return b.bucket.sequence } +func (b *Bucket) Sequence() uint64 { + return b.InSequence() +} // SetSequence updates the sequence number for the bucket. func (b *Bucket) SetSequence(v uint64) error { if b.tx.db == nil { - return ErrTxClosed + return errors.ErrTxClosed } else if !b.Writable() { - return ErrTxNotWritable + return errors.ErrTxNotWritable } // Materialize the root node if it hasn't been already so that the // bucket will be saved during commit. if b.rootNode == nil { - _ = b.node(b.root, nil) + _ = b.node(b.RootPage(), nil) } // Set the sequence. - b.bucket.sequence = v + b.SetInSequence(v) return nil } // NextSequence returns an autoincrementing integer for the bucket. func (b *Bucket) NextSequence() (uint64, error) { if b.tx.db == nil { - return 0, ErrTxClosed + return 0, errors.ErrTxClosed } else if !b.Writable() { - return 0, ErrTxNotWritable + return 0, errors.ErrTxNotWritable } // Materialize the root node if it hasn't been already so that the // bucket will be saved during commit. if b.rootNode == nil { - _ = b.node(b.root, nil) + _ = b.node(b.RootPage(), nil) } // Increment and return the sequence. - b.bucket.sequence++ - return b.bucket.sequence, nil + b.IncSequence() + return b.Sequence(), nil } // ForEach executes a function for each key/value pair in a bucket. @@ -390,7 +584,7 @@ func (b *Bucket) NextSequence() (uint64, error) { // the bucket; this will result in undefined behavior. func (b *Bucket) ForEach(fn func(k, v []byte) error) error { if b.tx.db == nil { - return ErrTxClosed + return errors.ErrTxClosed } c := b.Cursor() for k, v := c.First(); k != nil; k, v = c.Next() { @@ -403,11 +597,11 @@ func (b *Bucket) ForEach(fn func(k, v []byte) error) error { func (b *Bucket) ForEachBucket(fn func(k []byte) error) error { if b.tx.db == nil { - return ErrTxClosed + return errors.ErrTxClosed } c := b.Cursor() for k, _, flags := c.first(); k != nil; k, _, flags = c.next() { - if flags&bucketLeafFlag != 0 { + if flags&common.BucketLeafFlag != 0 { if err := fn(k); err != nil { return err } @@ -421,64 +615,64 @@ func (b *Bucket) Stats() BucketStats { var s, subStats BucketStats pageSize := b.tx.db.pageSize s.BucketN += 1 - if b.root == 0 { + if b.RootPage() == 0 { s.InlineBucketN += 1 } - b.forEachPage(func(p *page, depth int, pgstack []pgid) { - if (p.flags & leafPageFlag) != 0 { - s.KeyN += int(p.count) + b.forEachPage(func(p *common.Page, depth int, pgstack []common.Pgid) { + if p.IsLeafPage() { + s.KeyN += int(p.Count()) // used totals the used bytes for the page - used := pageHeaderSize + used := common.PageHeaderSize - if p.count != 0 { + if p.Count() != 0 { // If page has any elements, add all element headers. - used += leafPageElementSize * uintptr(p.count-1) + used += common.LeafPageElementSize * uintptr(p.Count()-1) // Add all element key, value sizes. // The computation takes advantage of the fact that the position // of the last element's key/value equals to the total of the sizes // of all previous elements' keys and values. // It also includes the last element's header. - lastElement := p.leafPageElement(p.count - 1) - used += uintptr(lastElement.pos + lastElement.ksize + lastElement.vsize) + lastElement := p.LeafPageElement(p.Count() - 1) + used += uintptr(lastElement.Pos() + lastElement.Ksize() + lastElement.Vsize()) } - if b.root == 0 { + if b.RootPage() == 0 { // For inlined bucket just update the inline stats s.InlineBucketInuse += int(used) } else { // For non-inlined bucket update all the leaf stats s.LeafPageN++ s.LeafInuse += int(used) - s.LeafOverflowN += int(p.overflow) + s.LeafOverflowN += int(p.Overflow()) // Collect stats from sub-buckets. // Do that by iterating over all element headers // looking for the ones with the bucketLeafFlag. - for i := uint16(0); i < p.count; i++ { - e := p.leafPageElement(i) - if (e.flags & bucketLeafFlag) != 0 { + for i := uint16(0); i < p.Count(); i++ { + e := p.LeafPageElement(i) + if (e.Flags() & common.BucketLeafFlag) != 0 { // For any bucket element, open the element value // and recursively call Stats on the contained bucket. - subStats.Add(b.openBucket(e.value()).Stats()) + subStats.Add(b.openBucket(e.Value()).Stats()) } } } - } else if (p.flags & branchPageFlag) != 0 { + } else if p.IsBranchPage() { s.BranchPageN++ - lastElement := p.branchPageElement(p.count - 1) + lastElement := p.BranchPageElement(p.Count() - 1) // used totals the used bytes for the page // Add header and all element headers. - used := pageHeaderSize + (branchPageElementSize * uintptr(p.count-1)) + used := common.PageHeaderSize + (common.BranchPageElementSize * uintptr(p.Count()-1)) // Add size of all keys and values. // Again, use the fact that last element's position equals to // the total of key, value sizes of all previous elements. - used += uintptr(lastElement.pos + lastElement.ksize) + used += uintptr(lastElement.Pos() + lastElement.Ksize()) s.BranchInuse += int(used) - s.BranchOverflowN += int(p.overflow) + s.BranchOverflowN += int(p.Overflow()) } // Keep track of maximum page depth. @@ -499,29 +693,29 @@ func (b *Bucket) Stats() BucketStats { } // forEachPage iterates over every page in a bucket, including inline pages. -func (b *Bucket) forEachPage(fn func(*page, int, []pgid)) { +func (b *Bucket) forEachPage(fn func(*common.Page, int, []common.Pgid)) { // If we have an inline page then just use that. if b.page != nil { - fn(b.page, 0, []pgid{b.root}) + fn(b.page, 0, []common.Pgid{b.RootPage()}) return } // Otherwise traverse the page hierarchy. - b.tx.forEachPage(b.root, fn) + b.tx.forEachPage(b.RootPage(), fn) } // forEachPageNode iterates over every page (or node) in a bucket. // This also includes inline pages. -func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) { +func (b *Bucket) forEachPageNode(fn func(*common.Page, *node, int)) { // If we have an inline page or root node then just use that. if b.page != nil { fn(b.page, nil, 0) return } - b._forEachPageNode(b.root, 0, fn) + b._forEachPageNode(b.RootPage(), 0, fn) } -func (b *Bucket) _forEachPageNode(pgId pgid, depth int, fn func(*page, *node, int)) { +func (b *Bucket) _forEachPageNode(pgId common.Pgid, depth int, fn func(*common.Page, *node, int)) { var p, n = b.pageNode(pgId) // Execute function. @@ -529,16 +723,16 @@ func (b *Bucket) _forEachPageNode(pgId pgid, depth int, fn func(*page, *node, in // Recursively loop over children. if p != nil { - if (p.flags & branchPageFlag) != 0 { - for i := 0; i < int(p.count); i++ { - elem := p.branchPageElement(uint16(i)) - b._forEachPageNode(elem.pgid, depth+1, fn) + if p.IsBranchPage() { + for i := 0; i < int(p.Count()); i++ { + elem := p.BranchPageElement(uint16(i)) + b._forEachPageNode(elem.Pgid(), depth+1, fn) } } } else { if !n.isLeaf { for _, inode := range n.inodes { - b._forEachPageNode(inode.pgid, depth+1, fn) + b._forEachPageNode(inode.Pgid(), depth+1, fn) } } } @@ -561,9 +755,9 @@ func (b *Bucket) spill() error { } // Update the child bucket header in this bucket. - value = make([]byte, unsafe.Sizeof(bucket{})) - var bucket = (*bucket)(unsafe.Pointer(&value[0])) - *bucket = *child.bucket + value = make([]byte, unsafe.Sizeof(common.InBucket{})) + var bucket = (*common.InBucket)(unsafe.Pointer(&value[0])) + *bucket = *child.InBucket } // Skip writing the bucket if there are no materialized nodes. @@ -577,10 +771,10 @@ func (b *Bucket) spill() error { if !bytes.Equal([]byte(name), k) { panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k)) } - if flags&bucketLeafFlag == 0 { + if flags&common.BucketLeafFlag == 0 { panic(fmt.Sprintf("unexpected bucket header flag: %x", flags)) } - c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag) + c.node().put([]byte(name), []byte(name), value, 0, common.BucketLeafFlag) } // Ignore if there's not a materialized root node. @@ -595,16 +789,16 @@ func (b *Bucket) spill() error { b.rootNode = b.rootNode.root() // Update the root node for this bucket. - if b.rootNode.pgid >= b.tx.meta.pgid { - panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid)) + if b.rootNode.pgid >= b.tx.meta.Pgid() { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.Pgid())) } - b.root = b.rootNode.pgid + b.SetRootPage(b.rootNode.pgid) return nil } // inlineable returns true if a bucket is small enough to be written inline -// and if it contains no subbuckets. Otherwise returns false. +// and if it contains no subbuckets. Otherwise, returns false. func (b *Bucket) inlineable() bool { var n = b.rootNode @@ -615,11 +809,11 @@ func (b *Bucket) inlineable() bool { // Bucket is not inlineable if it contains subbuckets or if it goes beyond // our threshold for inline bucket size. - var size = pageHeaderSize + var size = common.PageHeaderSize for _, inode := range n.inodes { - size += leafPageElementSize + uintptr(len(inode.key)) + uintptr(len(inode.value)) + size += common.LeafPageElementSize + uintptr(len(inode.Key())) + uintptr(len(inode.Value())) - if inode.flags&bucketLeafFlag != 0 { + if inode.Flags()&common.BucketLeafFlag != 0 { return false } else if size > b.maxInlineBucketSize() { return false @@ -638,14 +832,14 @@ func (b *Bucket) maxInlineBucketSize() uintptr { func (b *Bucket) write() []byte { // Allocate the appropriate size. var n = b.rootNode - var value = make([]byte, bucketHeaderSize+n.size()) + var value = make([]byte, common.BucketHeaderSize+n.size()) // Write a bucket header. - var bucket = (*bucket)(unsafe.Pointer(&value[0])) - *bucket = *b.bucket + var bucket = (*common.InBucket)(unsafe.Pointer(&value[0])) + *bucket = *b.InBucket // Convert byte slice to a fake page and write the root node. - var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) + var p = (*common.Page)(unsafe.Pointer(&value[common.BucketHeaderSize])) n.write(p) return value @@ -662,8 +856,8 @@ func (b *Bucket) rebalance() { } // node creates a node from a page and associates it with a given parent. -func (b *Bucket) node(pgId pgid, parent *node) *node { - _assert(b.nodes != nil, "nodes map expected") +func (b *Bucket) node(pgId common.Pgid, parent *node) *node { + common.Assert(b.nodes != nil, "nodes map expected") // Retrieve node if it's already been created. if n := b.nodes[pgId]; n != nil { @@ -682,6 +876,12 @@ func (b *Bucket) node(pgId pgid, parent *node) *node { var p = b.page if p == nil { p = b.tx.page(pgId) + } else { + // if p isn't nil, then it's an inline bucket. + // The pgId must be 0 in this case. + common.Verify(func() { + common.Assert(pgId == 0, "The page ID (%d) isn't 0 for an inline bucket", pgId) + }) } // Read the page into the node and cache it. @@ -696,19 +896,19 @@ func (b *Bucket) node(pgId pgid, parent *node) *node { // free recursively frees all pages in the bucket. func (b *Bucket) free() { - if b.root == 0 { + if b.RootPage() == 0 { return } var tx = b.tx - b.forEachPageNode(func(p *page, n *node, _ int) { + b.forEachPageNode(func(p *common.Page, n *node, _ int) { if p != nil { - tx.db.freelist.free(tx.meta.txid, p) + tx.db.freelist.Free(tx.meta.Txid(), p) } else { n.free() } }) - b.root = 0 + b.SetRootPage(0) } // dereference removes all references to the old mmap. @@ -723,11 +923,11 @@ func (b *Bucket) dereference() { } // pageNode returns the in-memory node, if it exists. -// Otherwise returns the underlying page. -func (b *Bucket) pageNode(id pgid) (*page, *node) { +// Otherwise, returns the underlying page. +func (b *Bucket) pageNode(id common.Pgid) (*common.Page, *node) { // Inline buckets have a fake page embedded in their value so treat them // differently. We'll return the rootNode (if available) or the fake page. - if b.root == 0 { + if b.RootPage() == 0 { if id != 0 { panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id)) } @@ -797,3 +997,9 @@ func cloneBytes(v []byte) []byte { copy(clone, v) return clone } + +type BucketStructure struct { + Name string `json:"name"` // name of the bucket + KeyN int `json:"keyN"` // number of key/value pairs + Children []BucketStructure `json:"buckets,omitempty"` // child buckets +} diff --git a/vendor/go.etcd.io/bbolt/cursor.go b/vendor/go.etcd.io/bbolt/cursor.go index bbfd92a9bc1ee..0c1e28c106ff2 100644 --- a/vendor/go.etcd.io/bbolt/cursor.go +++ b/vendor/go.etcd.io/bbolt/cursor.go @@ -4,6 +4,9 @@ import ( "bytes" "fmt" "sort" + + "go.etcd.io/bbolt/errors" + "go.etcd.io/bbolt/internal/common" ) // Cursor represents an iterator that can traverse over all key/value pairs in a bucket @@ -30,9 +33,9 @@ func (c *Cursor) Bucket() *Bucket { // If the bucket is empty then a nil key and value are returned. // The returned key and value are only valid for the life of the transaction. func (c *Cursor) First() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") + common.Assert(c.bucket.tx.db != nil, "tx closed") k, v, flags := c.first() - if (flags & uint32(bucketLeafFlag)) != 0 { + if (flags & uint32(common.BucketLeafFlag)) != 0 { return k, nil } return k, v @@ -40,7 +43,7 @@ func (c *Cursor) First() (key []byte, value []byte) { func (c *Cursor) first() (key []byte, value []byte, flags uint32) { c.stack = c.stack[:0] - p, n := c.bucket.pageNode(c.bucket.root) + p, n := c.bucket.pageNode(c.bucket.RootPage()) c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) c.goToFirstElementOnTheStack() @@ -51,7 +54,7 @@ func (c *Cursor) first() (key []byte, value []byte, flags uint32) { } k, v, flags := c.keyValue() - if (flags & uint32(bucketLeafFlag)) != 0 { + if (flags & uint32(common.BucketLeafFlag)) != 0 { return k, nil, flags } return k, v, flags @@ -61,9 +64,9 @@ func (c *Cursor) first() (key []byte, value []byte, flags uint32) { // If the bucket is empty then a nil key and value are returned. // The returned key and value are only valid for the life of the transaction. func (c *Cursor) Last() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") + common.Assert(c.bucket.tx.db != nil, "tx closed") c.stack = c.stack[:0] - p, n := c.bucket.pageNode(c.bucket.root) + p, n := c.bucket.pageNode(c.bucket.RootPage()) ref := elemRef{page: p, node: n} ref.index = ref.count() - 1 c.stack = append(c.stack, ref) @@ -80,7 +83,7 @@ func (c *Cursor) Last() (key []byte, value []byte) { } k, v, flags := c.keyValue() - if (flags & uint32(bucketLeafFlag)) != 0 { + if (flags & uint32(common.BucketLeafFlag)) != 0 { return k, nil } return k, v @@ -90,9 +93,9 @@ func (c *Cursor) Last() (key []byte, value []byte) { // If the cursor is at the end of the bucket then a nil key and value are returned. // The returned key and value are only valid for the life of the transaction. func (c *Cursor) Next() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") + common.Assert(c.bucket.tx.db != nil, "tx closed") k, v, flags := c.next() - if (flags & uint32(bucketLeafFlag)) != 0 { + if (flags & uint32(common.BucketLeafFlag)) != 0 { return k, nil } return k, v @@ -102,9 +105,9 @@ func (c *Cursor) Next() (key []byte, value []byte) { // If the cursor is at the beginning of the bucket then a nil key and value are returned. // The returned key and value are only valid for the life of the transaction. func (c *Cursor) Prev() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") + common.Assert(c.bucket.tx.db != nil, "tx closed") k, v, flags := c.prev() - if (flags & uint32(bucketLeafFlag)) != 0 { + if (flags & uint32(common.BucketLeafFlag)) != 0 { return k, nil } return k, v @@ -115,7 +118,7 @@ func (c *Cursor) Prev() (key []byte, value []byte) { // follow, a nil key is returned. // The returned key and value are only valid for the life of the transaction. func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") + common.Assert(c.bucket.tx.db != nil, "tx closed") k, v, flags := c.seek(seek) @@ -126,7 +129,7 @@ func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { if k == nil { return nil, nil - } else if (flags & uint32(bucketLeafFlag)) != 0 { + } else if (flags & uint32(common.BucketLeafFlag)) != 0 { return k, nil } return k, v @@ -136,15 +139,15 @@ func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { // Delete fails if current key/value is a bucket or if the transaction is not writable. func (c *Cursor) Delete() error { if c.bucket.tx.db == nil { - return ErrTxClosed + return errors.ErrTxClosed } else if !c.bucket.Writable() { - return ErrTxNotWritable + return errors.ErrTxNotWritable } key, _, flags := c.keyValue() // Return an error if current value is a bucket. - if (flags & bucketLeafFlag) != 0 { - return ErrIncompatibleValue + if (flags & common.BucketLeafFlag) != 0 { + return errors.ErrIncompatibleValue } c.node().del(key) @@ -156,7 +159,7 @@ func (c *Cursor) Delete() error { func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) { // Start from root page/node and traverse to correct page. c.stack = c.stack[:0] - c.search(seek, c.bucket.root) + c.search(seek, c.bucket.RootPage()) // If this is a bucket then return a nil value. return c.keyValue() @@ -172,11 +175,11 @@ func (c *Cursor) goToFirstElementOnTheStack() { } // Keep adding pages pointing to the first element to the stack. - var pgId pgid + var pgId common.Pgid if ref.node != nil { - pgId = ref.node.inodes[ref.index].pgid + pgId = ref.node.inodes[ref.index].Pgid() } else { - pgId = ref.page.branchPageElement(uint16(ref.index)).pgid + pgId = ref.page.BranchPageElement(uint16(ref.index)).Pgid() } p, n := c.bucket.pageNode(pgId) c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) @@ -193,11 +196,11 @@ func (c *Cursor) last() { } // Keep adding pages pointing to the last element in the stack. - var pgId pgid + var pgId common.Pgid if ref.node != nil { - pgId = ref.node.inodes[ref.index].pgid + pgId = ref.node.inodes[ref.index].Pgid() } else { - pgId = ref.page.branchPageElement(uint16(ref.index)).pgid + pgId = ref.page.BranchPageElement(uint16(ref.index)).Pgid() } p, n := c.bucket.pageNode(pgId) @@ -277,10 +280,10 @@ func (c *Cursor) prev() (key []byte, value []byte, flags uint32) { } // search recursively performs a binary search against a given page/node until it finds a given key. -func (c *Cursor) search(key []byte, pgId pgid) { +func (c *Cursor) search(key []byte, pgId common.Pgid) { p, n := c.bucket.pageNode(pgId) - if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 { - panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags)) + if p != nil && !p.IsBranchPage() && !p.IsLeafPage() { + panic(fmt.Sprintf("invalid page type: %d: %x", p.Id(), p.Flags())) } e := elemRef{page: p, node: n} c.stack = append(c.stack, e) @@ -303,7 +306,7 @@ func (c *Cursor) searchNode(key []byte, n *node) { index := sort.Search(len(n.inodes), func(i int) bool { // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. // sort.Search() finds the lowest index where f() != -1 but we need the highest index. - ret := bytes.Compare(n.inodes[i].key, key) + ret := bytes.Compare(n.inodes[i].Key(), key) if ret == 0 { exact = true } @@ -315,18 +318,18 @@ func (c *Cursor) searchNode(key []byte, n *node) { c.stack[len(c.stack)-1].index = index // Recursively search to the next page. - c.search(key, n.inodes[index].pgid) + c.search(key, n.inodes[index].Pgid()) } -func (c *Cursor) searchPage(key []byte, p *page) { +func (c *Cursor) searchPage(key []byte, p *common.Page) { // Binary search for the correct range. - inodes := p.branchPageElements() + inodes := p.BranchPageElements() var exact bool - index := sort.Search(int(p.count), func(i int) bool { + index := sort.Search(int(p.Count()), func(i int) bool { // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. // sort.Search() finds the lowest index where f() != -1 but we need the highest index. - ret := bytes.Compare(inodes[i].key(), key) + ret := bytes.Compare(inodes[i].Key(), key) if ret == 0 { exact = true } @@ -338,7 +341,7 @@ func (c *Cursor) searchPage(key []byte, p *page) { c.stack[len(c.stack)-1].index = index // Recursively search to the next page. - c.search(key, inodes[index].pgid) + c.search(key, inodes[index].Pgid()) } // nsearch searches the leaf node on the top of the stack for a key. @@ -349,16 +352,16 @@ func (c *Cursor) nsearch(key []byte) { // If we have a node then search its inodes. if n != nil { index := sort.Search(len(n.inodes), func(i int) bool { - return bytes.Compare(n.inodes[i].key, key) != -1 + return bytes.Compare(n.inodes[i].Key(), key) != -1 }) e.index = index return } // If we have a page then search its leaf elements. - inodes := p.leafPageElements() - index := sort.Search(int(p.count), func(i int) bool { - return bytes.Compare(inodes[i].key(), key) != -1 + inodes := p.LeafPageElements() + index := sort.Search(int(p.Count()), func(i int) bool { + return bytes.Compare(inodes[i].Key(), key) != -1 }) e.index = index } @@ -375,17 +378,17 @@ func (c *Cursor) keyValue() ([]byte, []byte, uint32) { // Retrieve value from node. if ref.node != nil { inode := &ref.node.inodes[ref.index] - return inode.key, inode.value, inode.flags + return inode.Key(), inode.Value(), inode.Flags() } // Or retrieve value from page. - elem := ref.page.leafPageElement(uint16(ref.index)) - return elem.key(), elem.value(), elem.flags + elem := ref.page.LeafPageElement(uint16(ref.index)) + return elem.Key(), elem.Value(), elem.Flags() } // node returns the node that the cursor is currently positioned on. func (c *Cursor) node() *node { - _assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack") + common.Assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack") // If the top of the stack is a leaf node then just return it. if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() { @@ -395,19 +398,19 @@ func (c *Cursor) node() *node { // Start from root and traverse down the hierarchy. var n = c.stack[0].node if n == nil { - n = c.bucket.node(c.stack[0].page.id, nil) + n = c.bucket.node(c.stack[0].page.Id(), nil) } for _, ref := range c.stack[:len(c.stack)-1] { - _assert(!n.isLeaf, "expected branch node") + common.Assert(!n.isLeaf, "expected branch node") n = n.childAt(ref.index) } - _assert(n.isLeaf, "expected leaf node") + common.Assert(n.isLeaf, "expected leaf node") return n } // elemRef represents a reference to an element on a given page/node. type elemRef struct { - page *page + page *common.Page node *node index int } @@ -417,7 +420,7 @@ func (r *elemRef) isLeaf() bool { if r.node != nil { return r.node.isLeaf } - return (r.page.flags & leafPageFlag) != 0 + return r.page.IsLeafPage() } // count returns the number of inodes or page elements. @@ -425,5 +428,5 @@ func (r *elemRef) count() int { if r.node != nil { return len(r.node.inodes) } - return int(r.page.count) + return int(r.page.Count()) } diff --git a/vendor/go.etcd.io/bbolt/db.go b/vendor/go.etcd.io/bbolt/db.go index 822798e41a5bf..5c1947e998413 100644 --- a/vendor/go.etcd.io/bbolt/db.go +++ b/vendor/go.etcd.io/bbolt/db.go @@ -3,49 +3,28 @@ package bbolt import ( "errors" "fmt" - "hash/fnv" "io" "os" "runtime" - "sort" "sync" "time" "unsafe" -) - -// The largest step that can be taken when remapping the mmap. -const maxMmapStep = 1 << 30 // 1GB - -// The data file format version. -const version = 2 - -// Represents a marker value to indicate that a file is a Bolt DB. -const magic uint32 = 0xED0CDAED - -const pgidNoFreelist pgid = 0xffffffffffffffff - -// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when -// syncing changes to a file. This is required as some operating systems, -// such as OpenBSD, do not have a unified buffer cache (UBC) and writes -// must be synchronized using the msync(2) syscall. -const IgnoreNoSync = runtime.GOOS == "openbsd" -// Default values if not set in a DB instance. -const ( - DefaultMaxBatchSize int = 1000 - DefaultMaxBatchDelay = 10 * time.Millisecond - DefaultAllocSize = 16 * 1024 * 1024 + berrors "go.etcd.io/bbolt/errors" + "go.etcd.io/bbolt/internal/common" + fl "go.etcd.io/bbolt/internal/freelist" ) -// default page size for db is set to the OS page size. -var defaultPageSize = os.Getpagesize() - // The time elapsed between consecutive file locking attempts. const flockRetryTimeout = 50 * time.Millisecond // FreelistType is the type of the freelist backend type FreelistType string +// TODO(ahrtr): eventually we should (step by step) +// 1. default to `FreelistMapType`; +// 2. remove the `FreelistArrayType`, do not export `FreelistMapType` +// and remove field `FreelistType' from both `DB` and `Options`; const ( // FreelistArrayType indicates backend freelist type is array FreelistArrayType = FreelistType("array") @@ -137,6 +116,8 @@ type DB struct { // Supported only on Unix via mlock/munlock syscalls. Mlock bool + logger Logger + path string openFile func(string, int, os.FileMode) (*os.File, error) file *os.File @@ -146,15 +127,14 @@ type DB struct { dataref []byte // mmap'ed readonly, write throws SEGV data *[maxMapSize]byte datasz int - filesz int // current on disk file size - meta0 *meta - meta1 *meta + meta0 *common.Meta + meta1 *common.Meta pageSize int opened bool rwtx *Tx txs []*Tx - freelist *freelist + freelist fl.Interface freelistLoad sync.Once pagePool sync.Pool @@ -191,13 +171,15 @@ func (db *DB) String() string { return fmt.Sprintf("DB<%q>", db.path) } -// Open creates and opens a database at the given path. -// If the file does not exist then it will be created automatically. +// Open creates and opens a database at the given path with a given file mode. +// If the file does not exist then it will be created automatically with a given file mode. // Passing in nil options will cause Bolt to open the database with the default options. -func Open(path string, mode os.FileMode, options *Options) (*DB, error) { - db := &DB{ +// Note: For read/write transactions, ensure the owner has write permission on the created/opened database file, e.g. 0600 +func Open(path string, mode os.FileMode, options *Options) (db *DB, err error) { + db = &DB{ opened: true, } + // Set default options if no options are provided. if options == nil { options = DefaultOptions @@ -211,9 +193,27 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { db.Mlock = options.Mlock // Set default values for later DB operations. - db.MaxBatchSize = DefaultMaxBatchSize - db.MaxBatchDelay = DefaultMaxBatchDelay - db.AllocSize = DefaultAllocSize + db.MaxBatchSize = common.DefaultMaxBatchSize + db.MaxBatchDelay = common.DefaultMaxBatchDelay + db.AllocSize = common.DefaultAllocSize + + if options.Logger == nil { + db.logger = getDiscardLogger() + } else { + db.logger = options.Logger + } + + lg := db.Logger() + if lg != discardLogger { + lg.Infof("Opening db file (%s) with mode %s and with options: %s", path, mode, options) + defer func() { + if err != nil { + lg.Errorf("Opening bbolt db (%s) failed: %v", path, err) + } else { + lg.Infof("Opening bbolt db (%s) successfully", path) + } + }() + } flag := os.O_RDWR if options.ReadOnly { @@ -222,6 +222,7 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { } else { // always load free pages in write mode db.PreLoadFreelist = true + flag |= os.O_CREATE } db.openFile = options.OpenFile @@ -230,9 +231,9 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { } // Open data file and separate sync handler for metadata writes. - var err error - if db.file, err = db.openFile(path, flag|os.O_CREATE, mode); err != nil { + if db.file, err = db.openFile(path, flag, mode); err != nil { _ = db.close() + lg.Errorf("failed to open db file (%s): %v", path, err) return nil, err } db.path = db.file.Name() @@ -244,8 +245,9 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { // if !options.ReadOnly. // The database file is locked using the shared lock (more than one process may // hold a lock at the same time) otherwise (options.ReadOnly is set). - if err := flock(db, !db.readOnly, options.Timeout); err != nil { + if err = flock(db, !db.readOnly, options.Timeout); err != nil { _ = db.close() + lg.Errorf("failed to lock db file (%s), readonly: %t, error: %v", path, db.readOnly, err) return nil, err } @@ -254,27 +256,28 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { if db.pageSize = options.PageSize; db.pageSize == 0 { // Set the default page size to the OS page size. - db.pageSize = defaultPageSize + db.pageSize = common.DefaultPageSize } // Initialize the database if it doesn't exist. - if info, err := db.file.Stat(); err != nil { + if info, statErr := db.file.Stat(); statErr != nil { _ = db.close() - return nil, err + lg.Errorf("failed to get db file's stats (%s): %v", path, err) + return nil, statErr } else if info.Size() == 0 { // Initialize new files with meta pages. - if err := db.init(); err != nil { + if err = db.init(); err != nil { // clean up file descriptor on initialization fail _ = db.close() + lg.Errorf("failed to initialize db file (%s): %v", path, err) return nil, err } } else { // try to get the page size from the metadata pages - if pgSize, err := db.getPageSize(); err == nil { - db.pageSize = pgSize - } else { + if db.pageSize, err = db.getPageSize(); err != nil { _ = db.close() - return nil, ErrInvalid + lg.Errorf("failed to get page size from db file (%s): %v", path, err) + return nil, err } } @@ -286,8 +289,9 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { } // Memory map the data file. - if err := db.mmap(options.InitialMmapSize); err != nil { + if err = db.mmap(options.InitialMmapSize); err != nil { _ = db.close() + lg.Errorf("failed to map db file (%s): %v", path, err) return nil, err } @@ -302,13 +306,14 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { // Flush freelist when transitioning from no sync to sync so // NoFreelistSync unaware boltdb can open the db later. if !db.NoFreelistSync && !db.hasSyncedFreelist() { - tx, err := db.Begin(true) + tx, txErr := db.Begin(true) if tx != nil { - err = tx.Commit() + txErr = tx.Commit() } - if err != nil { + if txErr != nil { + lg.Errorf("starting readwrite transaction failed: %v", txErr) _ = db.close() - return nil, err + return nil, txErr } } @@ -352,7 +357,7 @@ func (db *DB) getPageSize() (int, error) { return db.pageSize, nil } - return 0, ErrInvalid + return 0, berrors.ErrInvalid } // getPageSizeFromFirstMeta reads the pageSize from the first meta page @@ -361,11 +366,11 @@ func (db *DB) getPageSizeFromFirstMeta() (int, bool, error) { var metaCanRead bool if bw, err := db.file.ReadAt(buf[:], 0); err == nil && bw == len(buf) { metaCanRead = true - if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil { - return int(m.pageSize), metaCanRead, nil + if m := db.pageInBuffer(buf[:], 0).Meta(); m.Validate() == nil { + return int(m.PageSize()), metaCanRead, nil } } - return 0, metaCanRead, ErrInvalid + return 0, metaCanRead, berrors.ErrInvalid } // getPageSizeFromSecondMeta reads the pageSize from the second meta page @@ -397,13 +402,13 @@ func (db *DB) getPageSizeFromSecondMeta() (int, bool, error) { bw, err := db.file.ReadAt(buf[:], pos) if (err == nil && bw == len(buf)) || (err == io.EOF && int64(bw) == (fileSize-pos)) { metaCanRead = true - if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil { - return int(m.pageSize), metaCanRead, nil + if m := db.pageInBuffer(buf[:], 0).Meta(); m.Validate() == nil { + return int(m.PageSize()), metaCanRead, nil } } } - return 0, metaCanRead, ErrInvalid + return 0, metaCanRead, berrors.ErrInvalid } // loadFreelist reads the freelist if it is synced, or reconstructs it @@ -414,17 +419,29 @@ func (db *DB) loadFreelist() { db.freelist = newFreelist(db.FreelistType) if !db.hasSyncedFreelist() { // Reconstruct free list by scanning the DB. - db.freelist.readIDs(db.freepages()) + db.freelist.Init(db.freepages()) } else { // Read free list from freelist page. - db.freelist.read(db.page(db.meta().freelist)) + db.freelist.Read(db.page(db.meta().Freelist())) } - db.stats.FreePageN = db.freelist.free_count() + db.stats.FreePageN = db.freelist.FreeCount() }) } func (db *DB) hasSyncedFreelist() bool { - return db.meta().freelist != pgidNoFreelist + return db.meta().Freelist() != common.PgidNoFreelist +} + +func (db *DB) fileSize() (int, error) { + info, err := db.file.Stat() + if err != nil { + return 0, fmt.Errorf("file stat error: %w", err) + } + sz := int(info.Size()) + if sz < db.pageSize*2 { + return 0, fmt.Errorf("file size too small %d", sz) + } + return sz, nil } // mmap opens the underlying memory-mapped file and initializes the meta references. @@ -433,21 +450,22 @@ func (db *DB) mmap(minsz int) (err error) { db.mmaplock.Lock() defer db.mmaplock.Unlock() - info, err := db.file.Stat() - if err != nil { - return fmt.Errorf("mmap stat error: %s", err) - } else if int(info.Size()) < db.pageSize*2 { - return fmt.Errorf("file size too small") - } + lg := db.Logger() // Ensure the size is at least the minimum size. - fileSize := int(info.Size()) + var fileSize int + fileSize, err = db.fileSize() + if err != nil { + lg.Errorf("getting file size failed: %w", err) + return err + } var size = fileSize if size < minsz { size = minsz } size, err = db.mmapSize(size) if err != nil { + lg.Errorf("getting map size failed: %w", err) return err } @@ -472,6 +490,7 @@ func (db *DB) mmap(minsz int) (err error) { // gofail: var mapError string // return errors.New(mapError) if err = mmap(db, size); err != nil { + lg.Errorf("[GOOS: %s, GOARCH: %s] mmap failed, size: %d, error: %v", runtime.GOOS, runtime.GOARCH, size, err) return err } @@ -493,15 +512,16 @@ func (db *DB) mmap(minsz int) (err error) { } // Save references to the meta pages. - db.meta0 = db.page(0).meta() - db.meta1 = db.page(1).meta() + db.meta0 = db.page(0).Meta() + db.meta1 = db.page(1).Meta() // Validate the meta pages. We only return an error if both meta pages fail // validation, since meta0 failing validation means that it wasn't saved // properly -- but we can recover using meta1. And vice-versa. - err0 := db.meta0.validate() - err1 := db.meta1.validate() + err0 := db.meta0.Validate() + err1 := db.meta1.Validate() if err0 != nil && err1 != nil { + lg.Errorf("both meta pages are invalid, meta0: %v, meta1: %v", err0, err1) return err0 } @@ -524,6 +544,7 @@ func (db *DB) munmap() error { // gofail: var unmapError string // return errors.New(unmapError) if err := munmap(db); err != nil { + db.Logger().Errorf("[GOOS: %s, GOARCH: %s] munmap failed, db.datasz: %d, error: %v", runtime.GOOS, runtime.GOARCH, db.datasz, err) return fmt.Errorf("unmap error: %v", err.Error()) } @@ -543,13 +564,13 @@ func (db *DB) mmapSize(size int) (int, error) { // Verify the requested size is not above the maximum allowed. if size > maxMapSize { - return 0, fmt.Errorf("mmap too large") + return 0, errors.New("mmap too large") } // If larger than 1GB then grow by 1GB at a time. sz := int64(size) - if remainder := sz % int64(maxMmapStep); remainder > 0 { - sz += int64(maxMmapStep) - remainder + if remainder := sz % int64(common.MaxMmapStep); remainder > 0 { + sz += int64(common.MaxMmapStep) - remainder } // Ensure that the mmap size is a multiple of the page size. @@ -571,6 +592,7 @@ func (db *DB) munlock(fileSize int) error { // gofail: var munlockError string // return errors.New(munlockError) if err := munlock(db, fileSize); err != nil { + db.Logger().Errorf("[GOOS: %s, GOARCH: %s] munlock failed, fileSize: %d, db.datasz: %d, error: %v", runtime.GOOS, runtime.GOARCH, fileSize, db.datasz, err) return fmt.Errorf("munlock error: %v", err.Error()) } return nil @@ -580,6 +602,7 @@ func (db *DB) mlock(fileSize int) error { // gofail: var mlockError string // return errors.New(mlockError) if err := mlock(db, fileSize); err != nil { + db.Logger().Errorf("[GOOS: %s, GOARCH: %s] mlock failed, fileSize: %d, db.datasz: %d, error: %v", runtime.GOOS, runtime.GOARCH, fileSize, db.datasz, err) return fmt.Errorf("mlock error: %v", err.Error()) } return nil @@ -600,42 +623,43 @@ func (db *DB) init() error { // Create two meta pages on a buffer. buf := make([]byte, db.pageSize*4) for i := 0; i < 2; i++ { - p := db.pageInBuffer(buf, pgid(i)) - p.id = pgid(i) - p.flags = metaPageFlag + p := db.pageInBuffer(buf, common.Pgid(i)) + p.SetId(common.Pgid(i)) + p.SetFlags(common.MetaPageFlag) // Initialize the meta page. - m := p.meta() - m.magic = magic - m.version = version - m.pageSize = uint32(db.pageSize) - m.freelist = 2 - m.root = bucket{root: 3} - m.pgid = 4 - m.txid = txid(i) - m.checksum = m.sum64() + m := p.Meta() + m.SetMagic(common.Magic) + m.SetVersion(common.Version) + m.SetPageSize(uint32(db.pageSize)) + m.SetFreelist(2) + m.SetRootBucket(common.NewInBucket(3, 0)) + m.SetPgid(4) + m.SetTxid(common.Txid(i)) + m.SetChecksum(m.Sum64()) } // Write an empty freelist at page 3. - p := db.pageInBuffer(buf, pgid(2)) - p.id = pgid(2) - p.flags = freelistPageFlag - p.count = 0 + p := db.pageInBuffer(buf, common.Pgid(2)) + p.SetId(2) + p.SetFlags(common.FreelistPageFlag) + p.SetCount(0) // Write an empty leaf page at page 4. - p = db.pageInBuffer(buf, pgid(3)) - p.id = pgid(3) - p.flags = leafPageFlag - p.count = 0 + p = db.pageInBuffer(buf, common.Pgid(3)) + p.SetId(3) + p.SetFlags(common.LeafPageFlag) + p.SetCount(0) // Write the buffer to our data file. if _, err := db.ops.writeAt(buf, 0); err != nil { + db.Logger().Errorf("writeAt failed: %w", err) return err } if err := fdatasync(db); err != nil { + db.Logger().Errorf("[GOOS: %s, GOARCH: %s] fdatasync failed: %w", runtime.GOOS, runtime.GOARCH, err) return err } - db.filesz = len(buf) return nil } @@ -716,13 +740,31 @@ func (db *DB) close() error { // // IMPORTANT: You must close read-only transactions after you are finished or // else the database will not reclaim old pages. -func (db *DB) Begin(writable bool) (*Tx, error) { +func (db *DB) Begin(writable bool) (t *Tx, err error) { + if lg := db.Logger(); lg != discardLogger { + lg.Debugf("Starting a new transaction [writable: %t]", writable) + defer func() { + if err != nil { + lg.Errorf("Starting a new transaction [writable: %t] failed: %v", writable, err) + } else { + lg.Debugf("Starting a new transaction [writable: %t] successfully", writable) + } + }() + } + if writable { return db.beginRWTx() } return db.beginTx() } +func (db *DB) Logger() Logger { + if db == nil || db.logger == nil { + return getDiscardLogger() + } + return db.logger +} + func (db *DB) beginTx() (*Tx, error) { // Lock the meta pages while we initialize the transaction. We obtain // the meta lock before the mmap lock because that's the order that the @@ -738,14 +780,14 @@ func (db *DB) beginTx() (*Tx, error) { if !db.opened { db.mmaplock.RUnlock() db.metalock.Unlock() - return nil, ErrDatabaseNotOpen + return nil, berrors.ErrDatabaseNotOpen } // Exit if the database is not correctly mapped. if db.data == nil { db.mmaplock.RUnlock() db.metalock.Unlock() - return nil, ErrInvalidMapping + return nil, berrors.ErrInvalidMapping } // Create a transaction associated with the database. @@ -755,6 +797,9 @@ func (db *DB) beginTx() (*Tx, error) { // Keep track of transaction until it closes. db.txs = append(db.txs, t) n := len(db.txs) + if db.freelist != nil { + db.freelist.AddReadonlyTXID(t.meta.Txid()) + } // Unlock the meta pages. db.metalock.Unlock() @@ -771,7 +816,7 @@ func (db *DB) beginTx() (*Tx, error) { func (db *DB) beginRWTx() (*Tx, error) { // If the database was opened with Options.ReadOnly, return an error. if db.readOnly { - return nil, ErrDatabaseReadOnly + return nil, berrors.ErrDatabaseReadOnly } // Obtain writer lock. This is released by the transaction when it closes. @@ -786,49 +831,23 @@ func (db *DB) beginRWTx() (*Tx, error) { // Exit if the database is not open yet. if !db.opened { db.rwlock.Unlock() - return nil, ErrDatabaseNotOpen + return nil, berrors.ErrDatabaseNotOpen } // Exit if the database is not correctly mapped. if db.data == nil { db.rwlock.Unlock() - return nil, ErrInvalidMapping + return nil, berrors.ErrInvalidMapping } // Create a transaction associated with the database. t := &Tx{writable: true} t.init(db) db.rwtx = t - db.freePages() + db.freelist.ReleasePendingPages() return t, nil } -// freePages releases any pages associated with closed read-only transactions. -func (db *DB) freePages() { - // Free all pending pages prior to earliest open transaction. - sort.Sort(txsById(db.txs)) - minid := txid(0xFFFFFFFFFFFFFFFF) - if len(db.txs) > 0 { - minid = db.txs[0].meta.txid - } - if minid > 0 { - db.freelist.release(minid - 1) - } - // Release unused txid extents. - for _, t := range db.txs { - db.freelist.releaseRange(minid, t.meta.txid-1) - minid = t.meta.txid + 1 - } - db.freelist.releaseRange(minid, txid(0xFFFFFFFFFFFFFFFF)) - // Any page both allocated and freed in an extent is safe to release. -} - -type txsById []*Tx - -func (t txsById) Len() int { return len(t) } -func (t txsById) Swap(i, j int) { t[i], t[j] = t[j], t[i] } -func (t txsById) Less(i, j int) bool { return t[i].meta.txid < t[j].meta.txid } - // removeTx removes a transaction from the database. func (db *DB) removeTx(tx *Tx) { // Release the read lock on the mmap. @@ -848,6 +867,9 @@ func (db *DB) removeTx(tx *Tx) { } } n := len(db.txs) + if db.freelist != nil { + db.freelist.RemoveReadonlyTXID(tx.meta.Txid()) + } // Unlock the meta pages. db.metalock.Unlock() @@ -1056,7 +1078,20 @@ func safelyCall(fn func(*Tx) error, tx *Tx) (err error) { // // This is not necessary under normal operation, however, if you use NoSync // then it allows you to force the database file to sync against the disk. -func (db *DB) Sync() error { return fdatasync(db) } +func (db *DB) Sync() (err error) { + if lg := db.Logger(); lg != discardLogger { + lg.Debug("Syncing bbolt db (%s)", db.path) + defer func() { + if err != nil { + lg.Errorf("[GOOS: %s, GOARCH: %s] syncing bbolt db (%s) failed: %v", runtime.GOOS, runtime.GOARCH, db.path, err) + } else { + lg.Debugf("Syncing bbolt db (%s) successfully", db.path) + } + }() + } + + return fdatasync(db) +} // Stats retrieves ongoing performance stats for the database. // This is only updated when a transaction closes. @@ -1069,37 +1104,37 @@ func (db *DB) Stats() Stats { // This is for internal access to the raw data bytes from the C cursor, use // carefully, or not at all. func (db *DB) Info() *Info { - _assert(db.data != nil, "database file isn't correctly mapped") + common.Assert(db.data != nil, "database file isn't correctly mapped") return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize} } // page retrieves a page reference from the mmap based on the current page size. -func (db *DB) page(id pgid) *page { - pos := id * pgid(db.pageSize) - return (*page)(unsafe.Pointer(&db.data[pos])) +func (db *DB) page(id common.Pgid) *common.Page { + pos := id * common.Pgid(db.pageSize) + return (*common.Page)(unsafe.Pointer(&db.data[pos])) } // pageInBuffer retrieves a page reference from a given byte array based on the current page size. -func (db *DB) pageInBuffer(b []byte, id pgid) *page { - return (*page)(unsafe.Pointer(&b[id*pgid(db.pageSize)])) +func (db *DB) pageInBuffer(b []byte, id common.Pgid) *common.Page { + return (*common.Page)(unsafe.Pointer(&b[id*common.Pgid(db.pageSize)])) } // meta retrieves the current meta page reference. -func (db *DB) meta() *meta { +func (db *DB) meta() *common.Meta { // We have to return the meta with the highest txid which doesn't fail // validation. Otherwise, we can cause errors when in fact the database is // in a consistent state. metaA is the one with the higher txid. metaA := db.meta0 metaB := db.meta1 - if db.meta1.txid > db.meta0.txid { + if db.meta1.Txid() > db.meta0.Txid() { metaA = db.meta1 metaB = db.meta0 } // Use higher meta page if valid. Otherwise, fallback to previous, if valid. - if err := metaA.validate(); err == nil { + if err := metaA.Validate(); err == nil { return metaA - } else if err := metaB.validate(); err == nil { + } else if err := metaB.Validate(); err == nil { return metaB } @@ -1109,7 +1144,7 @@ func (db *DB) meta() *meta { } // allocate returns a contiguous block of memory starting at a given page. -func (db *DB) allocate(txid txid, count int) (*page, error) { +func (db *DB) allocate(txid common.Txid, count int) (*common.Page, error) { // Allocate a temporary buffer for the page. var buf []byte if count == 1 { @@ -1117,17 +1152,18 @@ func (db *DB) allocate(txid txid, count int) (*page, error) { } else { buf = make([]byte, count*db.pageSize) } - p := (*page)(unsafe.Pointer(&buf[0])) - p.overflow = uint32(count - 1) + p := (*common.Page)(unsafe.Pointer(&buf[0])) + p.SetOverflow(uint32(count - 1)) // Use pages from the freelist if they are available. - if p.id = db.freelist.allocate(txid, count); p.id != 0 { + p.SetId(db.freelist.Allocate(txid, count)) + if p.Id() != 0 { return p, nil } // Resize mmap() if we're at the end. - p.id = db.rwtx.meta.pgid - var minsz = int((p.id+pgid(count))+1) * db.pageSize + p.SetId(db.rwtx.meta.Pgid()) + var minsz = int((p.Id()+common.Pgid(count))+1) * db.pageSize if minsz >= db.datasz { if err := db.mmap(minsz); err != nil { return nil, fmt.Errorf("mmap allocate error: %s", err) @@ -1135,7 +1171,8 @@ func (db *DB) allocate(txid txid, count int) (*page, error) { } // Move the page id high water mark. - db.rwtx.meta.pgid += pgid(count) + curPgid := db.rwtx.meta.Pgid() + db.rwtx.meta.SetPgid(curPgid + common.Pgid(count)) return p, nil } @@ -1143,7 +1180,13 @@ func (db *DB) allocate(txid txid, count int) (*page, error) { // grow grows the size of the database to the given sz. func (db *DB) grow(sz int) error { // Ignore if the new size is less than available file size. - if sz <= db.filesz { + lg := db.Logger() + fileSize, err := db.fileSize() + if err != nil { + lg.Errorf("getting file size failed: %w", err) + return err + } + if sz <= fileSize { return nil } @@ -1162,21 +1205,22 @@ func (db *DB) grow(sz int) error { // gofail: var resizeFileError string // return errors.New(resizeFileError) if err := db.file.Truncate(int64(sz)); err != nil { + lg.Errorf("[GOOS: %s, GOARCH: %s] truncating file failed, size: %d, db.datasz: %d, error: %v", runtime.GOOS, runtime.GOARCH, sz, db.datasz, err) return fmt.Errorf("file resize error: %s", err) } } if err := db.file.Sync(); err != nil { + lg.Errorf("[GOOS: %s, GOARCH: %s] syncing file failed, db.datasz: %d, error: %v", runtime.GOOS, runtime.GOARCH, db.datasz, err) return fmt.Errorf("file sync error: %s", err) } if db.Mlock { // unlock old file and lock new one - if err := db.mrelock(db.filesz, sz); err != nil { + if err := db.mrelock(fileSize, sz); err != nil { return fmt.Errorf("mlock/munlock error: %s", err) } } } - db.filesz = sz return nil } @@ -1184,7 +1228,7 @@ func (db *DB) IsReadOnly() bool { return db.readOnly } -func (db *DB) freepages() []pgid { +func (db *DB) freepages() []common.Pgid { tx, err := db.beginTx() defer func() { err = tx.Rollback() @@ -1196,21 +1240,21 @@ func (db *DB) freepages() []pgid { panic("freepages: failed to open read only tx") } - reachable := make(map[pgid]*page) - nofreed := make(map[pgid]bool) + reachable := make(map[common.Pgid]*common.Page) + nofreed := make(map[common.Pgid]bool) ech := make(chan error) go func() { for e := range ech { panic(fmt.Sprintf("freepages: failed to get all reachable pages (%v)", e)) } }() - tx.checkBucket(&tx.root, reachable, nofreed, HexKVStringer(), ech) + tx.recursivelyCheckBucket(&tx.root, reachable, nofreed, HexKVStringer(), ech) close(ech) // TODO: If check bucket reported any corruptions (ech) we shouldn't proceed to freeing the pages. - var fids []pgid - for i := pgid(2); i < db.meta().pgid; i++ { + var fids []common.Pgid + for i := common.Pgid(2); i < db.meta().Pgid(); i++ { if _, ok := reachable[i]; !ok { fids = append(fids, i) } @@ -1218,11 +1262,17 @@ func (db *DB) freepages() []pgid { return fids } +func newFreelist(freelistType FreelistType) fl.Interface { + if freelistType == FreelistMapType { + return fl.NewHashMapFreelist() + } + return fl.NewArrayFreelist() +} + // Options represents the options that can be set when opening a database. type Options struct { // Timeout is the amount of time to wait to obtain a file lock. - // When set to zero it will wait indefinitely. This option is only - // available on Darwin and Linux. + // When set to zero it will wait indefinitely. Timeout time.Duration // Sets the DB.NoGrowSync flag before memory mapping the file. @@ -1277,6 +1327,19 @@ type Options struct { // It prevents potential page faults, however // used memory can't be reclaimed. (UNIX only) Mlock bool + + // Logger is the logger used for bbolt. + Logger Logger +} + +func (o *Options) String() string { + if o == nil { + return "{}" + } + + return fmt.Sprintf("{Timeout: %s, NoGrowSync: %t, NoFreelistSync: %t, PreLoadFreelist: %t, FreelistType: %s, ReadOnly: %t, MmapFlags: %x, InitialMmapSize: %d, PageSize: %d, NoSync: %t, OpenFile: %p, Mlock: %t, Logger: %p}", + o.Timeout, o.NoGrowSync, o.NoFreelistSync, o.PreLoadFreelist, o.FreelistType, o.ReadOnly, o.MmapFlags, o.InitialMmapSize, o.PageSize, o.NoSync, o.OpenFile, o.Mlock, o.Logger) + } // DefaultOptions represent the options used if nil options are passed into Open(). @@ -1327,65 +1390,3 @@ type Info struct { Data uintptr PageSize int } - -type meta struct { - magic uint32 - version uint32 - pageSize uint32 - flags uint32 - root bucket - freelist pgid - pgid pgid - txid txid - checksum uint64 -} - -// validate checks the marker bytes and version of the meta page to ensure it matches this binary. -func (m *meta) validate() error { - if m.magic != magic { - return ErrInvalid - } else if m.version != version { - return ErrVersionMismatch - } else if m.checksum != m.sum64() { - return ErrChecksum - } - return nil -} - -// copy copies one meta object to another. -func (m *meta) copy(dest *meta) { - *dest = *m -} - -// write writes the meta onto a page. -func (m *meta) write(p *page) { - if m.root.root >= m.pgid { - panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) - } else if m.freelist >= m.pgid && m.freelist != pgidNoFreelist { - // TODO: reject pgidNoFreeList if !NoFreelistSync - panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) - } - - // Page id is either going to be 0 or 1 which we can determine by the transaction ID. - p.id = pgid(m.txid % 2) - p.flags |= metaPageFlag - - // Calculate the checksum. - m.checksum = m.sum64() - - m.copy(p.meta()) -} - -// generates the checksum for the meta. -func (m *meta) sum64() uint64 { - var h = fnv.New64a() - _, _ = h.Write((*[unsafe.Offsetof(meta{}.checksum)]byte)(unsafe.Pointer(m))[:]) - return h.Sum64() -} - -// _assert will panic with a given formatted message if the given condition is false. -func _assert(condition bool, msg string, v ...interface{}) { - if !condition { - panic(fmt.Sprintf("assertion failed: "+msg, v...)) - } -} diff --git a/vendor/go.etcd.io/bbolt/errors.go b/vendor/go.etcd.io/bbolt/errors.go index f2c3b20ed8b7e..02958c86f5df8 100644 --- a/vendor/go.etcd.io/bbolt/errors.go +++ b/vendor/go.etcd.io/bbolt/errors.go @@ -1,78 +1,108 @@ package bbolt -import "errors" +import "go.etcd.io/bbolt/errors" // These errors can be returned when opening or calling methods on a DB. var ( // ErrDatabaseNotOpen is returned when a DB instance is accessed before it // is opened or after it is closed. - ErrDatabaseNotOpen = errors.New("database not open") - - // ErrDatabaseOpen is returned when opening a database that is - // already open. - ErrDatabaseOpen = errors.New("database already open") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrDatabaseNotOpen = errors.ErrDatabaseNotOpen // ErrInvalid is returned when both meta pages on a database are invalid. // This typically occurs when a file is not a bolt database. - ErrInvalid = errors.New("invalid database") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrInvalid = errors.ErrInvalid // ErrInvalidMapping is returned when the database file fails to get mapped. - ErrInvalidMapping = errors.New("database isn't correctly mapped") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrInvalidMapping = errors.ErrInvalidMapping // ErrVersionMismatch is returned when the data file was created with a // different version of Bolt. - ErrVersionMismatch = errors.New("version mismatch") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrVersionMismatch = errors.ErrVersionMismatch - // ErrChecksum is returned when either meta page checksum does not match. - ErrChecksum = errors.New("checksum error") + // ErrChecksum is returned when a checksum mismatch occurs on either of the two meta pages. + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrChecksum = errors.ErrChecksum // ErrTimeout is returned when a database cannot obtain an exclusive lock // on the data file after the timeout passed to Open(). - ErrTimeout = errors.New("timeout") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrTimeout = errors.ErrTimeout ) // These errors can occur when beginning or committing a Tx. var ( // ErrTxNotWritable is returned when performing a write operation on a // read-only transaction. - ErrTxNotWritable = errors.New("tx not writable") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrTxNotWritable = errors.ErrTxNotWritable // ErrTxClosed is returned when committing or rolling back a transaction // that has already been committed or rolled back. - ErrTxClosed = errors.New("tx closed") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrTxClosed = errors.ErrTxClosed // ErrDatabaseReadOnly is returned when a mutating transaction is started on a // read-only database. - ErrDatabaseReadOnly = errors.New("database is in read-only mode") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrDatabaseReadOnly = errors.ErrDatabaseReadOnly // ErrFreePagesNotLoaded is returned when a readonly transaction without // preloading the free pages is trying to access the free pages. - ErrFreePagesNotLoaded = errors.New("free pages are not pre-loaded") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrFreePagesNotLoaded = errors.ErrFreePagesNotLoaded ) // These errors can occur when putting or deleting a value or a bucket. var ( // ErrBucketNotFound is returned when trying to access a bucket that has // not been created yet. - ErrBucketNotFound = errors.New("bucket not found") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrBucketNotFound = errors.ErrBucketNotFound // ErrBucketExists is returned when creating a bucket that already exists. - ErrBucketExists = errors.New("bucket already exists") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrBucketExists = errors.ErrBucketExists // ErrBucketNameRequired is returned when creating a bucket with a blank name. - ErrBucketNameRequired = errors.New("bucket name required") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrBucketNameRequired = errors.ErrBucketNameRequired // ErrKeyRequired is returned when inserting a zero-length key. - ErrKeyRequired = errors.New("key required") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrKeyRequired = errors.ErrKeyRequired // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize. - ErrKeyTooLarge = errors.New("key too large") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrKeyTooLarge = errors.ErrKeyTooLarge // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize. - ErrValueTooLarge = errors.New("value too large") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrValueTooLarge = errors.ErrValueTooLarge // ErrIncompatibleValue is returned when trying create or delete a bucket // on an existing non-bucket key or when trying to create or delete a // non-bucket key on an existing bucket key. - ErrIncompatibleValue = errors.New("incompatible value") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrIncompatibleValue = errors.ErrIncompatibleValue ) diff --git a/vendor/go.etcd.io/bbolt/errors/errors.go b/vendor/go.etcd.io/bbolt/errors/errors.go new file mode 100644 index 0000000000000..c115289e56c23 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/errors/errors.go @@ -0,0 +1,84 @@ +// Package errors defines the error variables that may be returned +// during bbolt operations. +package errors + +import "errors" + +// These errors can be returned when opening or calling methods on a DB. +var ( + // ErrDatabaseNotOpen is returned when a DB instance is accessed before it + // is opened or after it is closed. + ErrDatabaseNotOpen = errors.New("database not open") + + // ErrInvalid is returned when both meta pages on a database are invalid. + // This typically occurs when a file is not a bolt database. + ErrInvalid = errors.New("invalid database") + + // ErrInvalidMapping is returned when the database file fails to get mapped. + ErrInvalidMapping = errors.New("database isn't correctly mapped") + + // ErrVersionMismatch is returned when the data file was created with a + // different version of Bolt. + ErrVersionMismatch = errors.New("version mismatch") + + // ErrChecksum is returned when a checksum mismatch occurs on either of the two meta pages. + ErrChecksum = errors.New("checksum error") + + // ErrTimeout is returned when a database cannot obtain an exclusive lock + // on the data file after the timeout passed to Open(). + ErrTimeout = errors.New("timeout") +) + +// These errors can occur when beginning or committing a Tx. +var ( + // ErrTxNotWritable is returned when performing a write operation on a + // read-only transaction. + ErrTxNotWritable = errors.New("tx not writable") + + // ErrTxClosed is returned when committing or rolling back a transaction + // that has already been committed or rolled back. + ErrTxClosed = errors.New("tx closed") + + // ErrDatabaseReadOnly is returned when a mutating transaction is started on a + // read-only database. + ErrDatabaseReadOnly = errors.New("database is in read-only mode") + + // ErrFreePagesNotLoaded is returned when a readonly transaction without + // preloading the free pages is trying to access the free pages. + ErrFreePagesNotLoaded = errors.New("free pages are not pre-loaded") +) + +// These errors can occur when putting or deleting a value or a bucket. +var ( + // ErrBucketNotFound is returned when trying to access a bucket that has + // not been created yet. + ErrBucketNotFound = errors.New("bucket not found") + + // ErrBucketExists is returned when creating a bucket that already exists. + ErrBucketExists = errors.New("bucket already exists") + + // ErrBucketNameRequired is returned when creating a bucket with a blank name. + ErrBucketNameRequired = errors.New("bucket name required") + + // ErrKeyRequired is returned when inserting a zero-length key. + ErrKeyRequired = errors.New("key required") + + // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize. + ErrKeyTooLarge = errors.New("key too large") + + // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize. + ErrValueTooLarge = errors.New("value too large") + + // ErrIncompatibleValue is returned when trying to create or delete a bucket + // on an existing non-bucket key or when trying to create or delete a + // non-bucket key on an existing bucket key. + ErrIncompatibleValue = errors.New("incompatible value") + + // ErrSameBuckets is returned when trying to move a sub-bucket between + // source and target buckets, while source and target buckets are the same. + ErrSameBuckets = errors.New("the source and target are the same bucket") + + // ErrDifferentDB is returned when trying to move a sub-bucket between + // source and target buckets, while source and target buckets are in different database files. + ErrDifferentDB = errors.New("the source and target buckets are in different database files") +) diff --git a/vendor/go.etcd.io/bbolt/freelist.go b/vendor/go.etcd.io/bbolt/freelist.go deleted file mode 100644 index dffc7bc749b52..0000000000000 --- a/vendor/go.etcd.io/bbolt/freelist.go +++ /dev/null @@ -1,410 +0,0 @@ -package bbolt - -import ( - "fmt" - "sort" - "unsafe" -) - -// txPending holds a list of pgids and corresponding allocation txns -// that are pending to be freed. -type txPending struct { - ids []pgid - alloctx []txid // txids allocating the ids - lastReleaseBegin txid // beginning txid of last matching releaseRange -} - -// pidSet holds the set of starting pgids which have the same span size -type pidSet map[pgid]struct{} - -// freelist represents a list of all pages that are available for allocation. -// It also tracks pages that have been freed but are still in use by open transactions. -type freelist struct { - freelistType FreelistType // freelist type - ids []pgid // all free and available free page ids. - allocs map[pgid]txid // mapping of txid that allocated a pgid. - pending map[txid]*txPending // mapping of soon-to-be free page ids by tx. - cache map[pgid]struct{} // fast lookup of all free and pending page ids. - freemaps map[uint64]pidSet // key is the size of continuous pages(span), value is a set which contains the starting pgids of same size - forwardMap map[pgid]uint64 // key is start pgid, value is its span size - backwardMap map[pgid]uint64 // key is end pgid, value is its span size - allocate func(txid txid, n int) pgid // the freelist allocate func - free_count func() int // the function which gives you free page number - mergeSpans func(ids pgids) // the mergeSpan func - getFreePageIDs func() []pgid // get free pgids func - readIDs func(pgids []pgid) // readIDs func reads list of pages and init the freelist -} - -// newFreelist returns an empty, initialized freelist. -func newFreelist(freelistType FreelistType) *freelist { - f := &freelist{ - freelistType: freelistType, - allocs: make(map[pgid]txid), - pending: make(map[txid]*txPending), - cache: make(map[pgid]struct{}), - freemaps: make(map[uint64]pidSet), - forwardMap: make(map[pgid]uint64), - backwardMap: make(map[pgid]uint64), - } - - if freelistType == FreelistMapType { - f.allocate = f.hashmapAllocate - f.free_count = f.hashmapFreeCount - f.mergeSpans = f.hashmapMergeSpans - f.getFreePageIDs = f.hashmapGetFreePageIDs - f.readIDs = f.hashmapReadIDs - } else { - f.allocate = f.arrayAllocate - f.free_count = f.arrayFreeCount - f.mergeSpans = f.arrayMergeSpans - f.getFreePageIDs = f.arrayGetFreePageIDs - f.readIDs = f.arrayReadIDs - } - - return f -} - -// size returns the size of the page after serialization. -func (f *freelist) size() int { - n := f.count() - if n >= 0xFFFF { - // The first element will be used to store the count. See freelist.write. - n++ - } - return int(pageHeaderSize) + (int(unsafe.Sizeof(pgid(0))) * n) -} - -// count returns count of pages on the freelist -func (f *freelist) count() int { - return f.free_count() + f.pending_count() -} - -// arrayFreeCount returns count of free pages(array version) -func (f *freelist) arrayFreeCount() int { - return len(f.ids) -} - -// pending_count returns count of pending pages -func (f *freelist) pending_count() int { - var count int - for _, txp := range f.pending { - count += len(txp.ids) - } - return count -} - -// copyall copies a list of all free ids and all pending ids in one sorted list. -// f.count returns the minimum length required for dst. -func (f *freelist) copyall(dst []pgid) { - m := make(pgids, 0, f.pending_count()) - for _, txp := range f.pending { - m = append(m, txp.ids...) - } - sort.Sort(m) - mergepgids(dst, f.getFreePageIDs(), m) -} - -// arrayAllocate returns the starting page id of a contiguous list of pages of a given size. -// If a contiguous block cannot be found then 0 is returned. -func (f *freelist) arrayAllocate(txid txid, n int) pgid { - if len(f.ids) == 0 { - return 0 - } - - var initial, previd pgid - for i, id := range f.ids { - if id <= 1 { - panic(fmt.Sprintf("invalid page allocation: %d", id)) - } - - // Reset initial page if this is not contiguous. - if previd == 0 || id-previd != 1 { - initial = id - } - - // If we found a contiguous block then remove it and return it. - if (id-initial)+1 == pgid(n) { - // If we're allocating off the beginning then take the fast path - // and just adjust the existing slice. This will use extra memory - // temporarily but the append() in free() will realloc the slice - // as is necessary. - if (i + 1) == n { - f.ids = f.ids[i+1:] - } else { - copy(f.ids[i-n+1:], f.ids[i+1:]) - f.ids = f.ids[:len(f.ids)-n] - } - - // Remove from the free cache. - for i := pgid(0); i < pgid(n); i++ { - delete(f.cache, initial+i) - } - f.allocs[initial] = txid - return initial - } - - previd = id - } - return 0 -} - -// free releases a page and its overflow for a given transaction id. -// If the page is already free then a panic will occur. -func (f *freelist) free(txid txid, p *page) { - if p.id <= 1 { - panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id)) - } - - // Free page and all its overflow pages. - txp := f.pending[txid] - if txp == nil { - txp = &txPending{} - f.pending[txid] = txp - } - allocTxid, ok := f.allocs[p.id] - if ok { - delete(f.allocs, p.id) - } else if (p.flags & freelistPageFlag) != 0 { - // Freelist is always allocated by prior tx. - allocTxid = txid - 1 - } - - for id := p.id; id <= p.id+pgid(p.overflow); id++ { - // Verify that page is not already free. - if _, ok := f.cache[id]; ok { - panic(fmt.Sprintf("page %d already freed", id)) - } - // Add to the freelist and cache. - txp.ids = append(txp.ids, id) - txp.alloctx = append(txp.alloctx, allocTxid) - f.cache[id] = struct{}{} - } -} - -// release moves all page ids for a transaction id (or older) to the freelist. -func (f *freelist) release(txid txid) { - m := make(pgids, 0) - for tid, txp := range f.pending { - if tid <= txid { - // Move transaction's pending pages to the available freelist. - // Don't remove from the cache since the page is still free. - m = append(m, txp.ids...) - delete(f.pending, tid) - } - } - f.mergeSpans(m) -} - -// releaseRange moves pending pages allocated within an extent [begin,end] to the free list. -func (f *freelist) releaseRange(begin, end txid) { - if begin > end { - return - } - var m pgids - for tid, txp := range f.pending { - if tid < begin || tid > end { - continue - } - // Don't recompute freed pages if ranges haven't updated. - if txp.lastReleaseBegin == begin { - continue - } - for i := 0; i < len(txp.ids); i++ { - if atx := txp.alloctx[i]; atx < begin || atx > end { - continue - } - m = append(m, txp.ids[i]) - txp.ids[i] = txp.ids[len(txp.ids)-1] - txp.ids = txp.ids[:len(txp.ids)-1] - txp.alloctx[i] = txp.alloctx[len(txp.alloctx)-1] - txp.alloctx = txp.alloctx[:len(txp.alloctx)-1] - i-- - } - txp.lastReleaseBegin = begin - if len(txp.ids) == 0 { - delete(f.pending, tid) - } - } - f.mergeSpans(m) -} - -// rollback removes the pages from a given pending tx. -func (f *freelist) rollback(txid txid) { - // Remove page ids from cache. - txp := f.pending[txid] - if txp == nil { - return - } - var m pgids - for i, pgid := range txp.ids { - delete(f.cache, pgid) - tx := txp.alloctx[i] - if tx == 0 { - continue - } - if tx != txid { - // Pending free aborted; restore page back to alloc list. - f.allocs[pgid] = tx - } else { - // Freed page was allocated by this txn; OK to throw away. - m = append(m, pgid) - } - } - // Remove pages from pending list and mark as free if allocated by txid. - delete(f.pending, txid) - - // Remove pgids which are allocated by this txid - for pgid, tid := range f.allocs { - if tid == txid { - delete(f.allocs, pgid) - } - } - - f.mergeSpans(m) -} - -// freed returns whether a given page is in the free list. -func (f *freelist) freed(pgId pgid) bool { - _, ok := f.cache[pgId] - return ok -} - -// read initializes the freelist from a freelist page. -func (f *freelist) read(p *page) { - if (p.flags & freelistPageFlag) == 0 { - panic(fmt.Sprintf("invalid freelist page: %d, page type is %s", p.id, p.typ())) - } - // If the page.count is at the max uint16 value (64k) then it's considered - // an overflow and the size of the freelist is stored as the first element. - var idx, count = 0, int(p.count) - if count == 0xFFFF { - idx = 1 - c := *(*pgid)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))) - count = int(c) - if count < 0 { - panic(fmt.Sprintf("leading element count %d overflows int", c)) - } - } - - // Copy the list of page ids from the freelist. - if count == 0 { - f.ids = nil - } else { - data := unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), unsafe.Sizeof(pgid(0)), idx) - ids := unsafe.Slice((*pgid)(data), count) - - // copy the ids, so we don't modify on the freelist page directly - idsCopy := make([]pgid, count) - copy(idsCopy, ids) - // Make sure they're sorted. - sort.Sort(pgids(idsCopy)) - - f.readIDs(idsCopy) - } -} - -// arrayReadIDs initializes the freelist from a given list of ids. -func (f *freelist) arrayReadIDs(ids []pgid) { - f.ids = ids - f.reindex() -} - -func (f *freelist) arrayGetFreePageIDs() []pgid { - return f.ids -} - -// write writes the page ids onto a freelist page. All free and pending ids are -// saved to disk since in the event of a program crash, all pending ids will -// become free. -func (f *freelist) write(p *page) error { - // Combine the old free pgids and pgids waiting on an open transaction. - - // Update the header flag. - p.flags |= freelistPageFlag - - // The page.count can only hold up to 64k elements so if we overflow that - // number then we handle it by putting the size in the first element. - l := f.count() - if l == 0 { - p.count = uint16(l) - } else if l < 0xFFFF { - p.count = uint16(l) - data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) - ids := unsafe.Slice((*pgid)(data), l) - f.copyall(ids) - } else { - p.count = 0xFFFF - data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) - ids := unsafe.Slice((*pgid)(data), l+1) - ids[0] = pgid(l) - f.copyall(ids[1:]) - } - - return nil -} - -// reload reads the freelist from a page and filters out pending items. -func (f *freelist) reload(p *page) { - f.read(p) - - // Build a cache of only pending pages. - pcache := make(map[pgid]bool) - for _, txp := range f.pending { - for _, pendingID := range txp.ids { - pcache[pendingID] = true - } - } - - // Check each page in the freelist and build a new available freelist - // with any pages not in the pending lists. - var a []pgid - for _, id := range f.getFreePageIDs() { - if !pcache[id] { - a = append(a, id) - } - } - - f.readIDs(a) -} - -// noSyncReload reads the freelist from pgids and filters out pending items. -func (f *freelist) noSyncReload(pgids []pgid) { - // Build a cache of only pending pages. - pcache := make(map[pgid]bool) - for _, txp := range f.pending { - for _, pendingID := range txp.ids { - pcache[pendingID] = true - } - } - - // Check each page in the freelist and build a new available freelist - // with any pages not in the pending lists. - var a []pgid - for _, id := range pgids { - if !pcache[id] { - a = append(a, id) - } - } - - f.readIDs(a) -} - -// reindex rebuilds the free cache based on available and pending free lists. -func (f *freelist) reindex() { - ids := f.getFreePageIDs() - f.cache = make(map[pgid]struct{}, len(ids)) - for _, id := range ids { - f.cache[id] = struct{}{} - } - for _, txp := range f.pending { - for _, pendingID := range txp.ids { - f.cache[pendingID] = struct{}{} - } - } -} - -// arrayMergeSpans try to merge list of pages(represented by pgids) with existing spans but using array -func (f *freelist) arrayMergeSpans(ids pgids) { - sort.Sort(ids) - f.ids = pgids(f.ids).merge(ids) -} diff --git a/vendor/go.etcd.io/bbolt/freelist_hmap.go b/vendor/go.etcd.io/bbolt/freelist_hmap.go deleted file mode 100644 index dbd67a1e7361a..0000000000000 --- a/vendor/go.etcd.io/bbolt/freelist_hmap.go +++ /dev/null @@ -1,178 +0,0 @@ -package bbolt - -import "sort" - -// hashmapFreeCount returns count of free pages(hashmap version) -func (f *freelist) hashmapFreeCount() int { - // use the forwardMap to get the total count - count := 0 - for _, size := range f.forwardMap { - count += int(size) - } - return count -} - -// hashmapAllocate serves the same purpose as arrayAllocate, but use hashmap as backend -func (f *freelist) hashmapAllocate(txid txid, n int) pgid { - if n == 0 { - return 0 - } - - // if we have a exact size match just return short path - if bm, ok := f.freemaps[uint64(n)]; ok { - for pid := range bm { - // remove the span - f.delSpan(pid, uint64(n)) - - f.allocs[pid] = txid - - for i := pgid(0); i < pgid(n); i++ { - delete(f.cache, pid+i) - } - return pid - } - } - - // lookup the map to find larger span - for size, bm := range f.freemaps { - if size < uint64(n) { - continue - } - - for pid := range bm { - // remove the initial - f.delSpan(pid, size) - - f.allocs[pid] = txid - - remain := size - uint64(n) - - // add remain span - f.addSpan(pid+pgid(n), remain) - - for i := pgid(0); i < pgid(n); i++ { - delete(f.cache, pid+i) - } - return pid - } - } - - return 0 -} - -// hashmapReadIDs reads pgids as input an initial the freelist(hashmap version) -func (f *freelist) hashmapReadIDs(pgids []pgid) { - f.init(pgids) - - // Rebuild the page cache. - f.reindex() -} - -// hashmapGetFreePageIDs returns the sorted free page ids -func (f *freelist) hashmapGetFreePageIDs() []pgid { - count := f.free_count() - if count == 0 { - return nil - } - - m := make([]pgid, 0, count) - for start, size := range f.forwardMap { - for i := 0; i < int(size); i++ { - m = append(m, start+pgid(i)) - } - } - sort.Sort(pgids(m)) - - return m -} - -// hashmapMergeSpans try to merge list of pages(represented by pgids) with existing spans -func (f *freelist) hashmapMergeSpans(ids pgids) { - for _, id := range ids { - // try to see if we can merge and update - f.mergeWithExistingSpan(id) - } -} - -// mergeWithExistingSpan merges pid to the existing free spans, try to merge it backward and forward -func (f *freelist) mergeWithExistingSpan(pid pgid) { - prev := pid - 1 - next := pid + 1 - - preSize, mergeWithPrev := f.backwardMap[prev] - nextSize, mergeWithNext := f.forwardMap[next] - newStart := pid - newSize := uint64(1) - - if mergeWithPrev { - //merge with previous span - start := prev + 1 - pgid(preSize) - f.delSpan(start, preSize) - - newStart -= pgid(preSize) - newSize += preSize - } - - if mergeWithNext { - // merge with next span - f.delSpan(next, nextSize) - newSize += nextSize - } - - f.addSpan(newStart, newSize) -} - -func (f *freelist) addSpan(start pgid, size uint64) { - f.backwardMap[start-1+pgid(size)] = size - f.forwardMap[start] = size - if _, ok := f.freemaps[size]; !ok { - f.freemaps[size] = make(map[pgid]struct{}) - } - - f.freemaps[size][start] = struct{}{} -} - -func (f *freelist) delSpan(start pgid, size uint64) { - delete(f.forwardMap, start) - delete(f.backwardMap, start+pgid(size-1)) - delete(f.freemaps[size], start) - if len(f.freemaps[size]) == 0 { - delete(f.freemaps, size) - } -} - -// initial from pgids using when use hashmap version -// pgids must be sorted -func (f *freelist) init(pgids []pgid) { - if len(pgids) == 0 { - return - } - - size := uint64(1) - start := pgids[0] - - if !sort.SliceIsSorted([]pgid(pgids), func(i, j int) bool { return pgids[i] < pgids[j] }) { - panic("pgids not sorted") - } - - f.freemaps = make(map[uint64]pidSet) - f.forwardMap = make(map[pgid]uint64) - f.backwardMap = make(map[pgid]uint64) - - for i := 1; i < len(pgids); i++ { - // continuous page - if pgids[i] == pgids[i-1]+1 { - size++ - } else { - f.addSpan(start, size) - - size = 1 - start = pgids[i] - } - } - - // init the tail - if size != 0 && start != 0 { - f.addSpan(start, size) - } -} diff --git a/vendor/go.etcd.io/bbolt/internal/common/bucket.go b/vendor/go.etcd.io/bbolt/internal/common/bucket.go new file mode 100644 index 0000000000000..2b4ab1453a1e0 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/common/bucket.go @@ -0,0 +1,54 @@ +package common + +import ( + "fmt" + "unsafe" +) + +const BucketHeaderSize = int(unsafe.Sizeof(InBucket{})) + +// InBucket represents the on-file representation of a bucket. +// This is stored as the "value" of a bucket key. If the bucket is small enough, +// then its root page can be stored inline in the "value", after the bucket +// header. In the case of inline buckets, the "root" will be 0. +type InBucket struct { + root Pgid // page id of the bucket's root-level page + sequence uint64 // monotonically incrementing, used by NextSequence() +} + +func NewInBucket(root Pgid, seq uint64) InBucket { + return InBucket{ + root: root, + sequence: seq, + } +} + +func (b *InBucket) RootPage() Pgid { + return b.root +} + +func (b *InBucket) SetRootPage(id Pgid) { + b.root = id +} + +// InSequence returns the sequence. The reason why not naming it `Sequence` +// is to avoid duplicated name as `(*Bucket) Sequence()` +func (b *InBucket) InSequence() uint64 { + return b.sequence +} + +func (b *InBucket) SetInSequence(v uint64) { + b.sequence = v +} + +func (b *InBucket) IncSequence() { + b.sequence++ +} + +func (b *InBucket) InlinePage(v []byte) *Page { + return (*Page)(unsafe.Pointer(&v[BucketHeaderSize])) +} + +func (b *InBucket) String() string { + return fmt.Sprintf("", b.root, b.sequence) +} diff --git a/vendor/go.etcd.io/bbolt/internal/common/inode.go b/vendor/go.etcd.io/bbolt/internal/common/inode.go new file mode 100644 index 0000000000000..080b9af789dae --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/common/inode.go @@ -0,0 +1,115 @@ +package common + +import "unsafe" + +// Inode represents an internal node inside of a node. +// It can be used to point to elements in a page or point +// to an element which hasn't been added to a page yet. +type Inode struct { + flags uint32 + pgid Pgid + key []byte + value []byte +} + +type Inodes []Inode + +func (in *Inode) Flags() uint32 { + return in.flags +} + +func (in *Inode) SetFlags(flags uint32) { + in.flags = flags +} + +func (in *Inode) Pgid() Pgid { + return in.pgid +} + +func (in *Inode) SetPgid(id Pgid) { + in.pgid = id +} + +func (in *Inode) Key() []byte { + return in.key +} + +func (in *Inode) SetKey(key []byte) { + in.key = key +} + +func (in *Inode) Value() []byte { + return in.value +} + +func (in *Inode) SetValue(value []byte) { + in.value = value +} + +func ReadInodeFromPage(p *Page) Inodes { + inodes := make(Inodes, int(p.Count())) + isLeaf := p.IsLeafPage() + for i := 0; i < int(p.Count()); i++ { + inode := &inodes[i] + if isLeaf { + elem := p.LeafPageElement(uint16(i)) + inode.SetFlags(elem.Flags()) + inode.SetKey(elem.Key()) + inode.SetValue(elem.Value()) + } else { + elem := p.BranchPageElement(uint16(i)) + inode.SetPgid(elem.Pgid()) + inode.SetKey(elem.Key()) + } + Assert(len(inode.Key()) > 0, "read: zero-length inode key") + } + + return inodes +} + +func WriteInodeToPage(inodes Inodes, p *Page) uint32 { + // Loop over each item and write it to the page. + // off tracks the offset into p of the start of the next data. + off := unsafe.Sizeof(*p) + p.PageElementSize()*uintptr(len(inodes)) + isLeaf := p.IsLeafPage() + for i, item := range inodes { + Assert(len(item.Key()) > 0, "write: zero-length inode key") + + // Create a slice to write into of needed size and advance + // byte pointer for next iteration. + sz := len(item.Key()) + len(item.Value()) + b := UnsafeByteSlice(unsafe.Pointer(p), off, 0, sz) + off += uintptr(sz) + + // Write the page element. + if isLeaf { + elem := p.LeafPageElement(uint16(i)) + elem.SetPos(uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))) + elem.SetFlags(item.Flags()) + elem.SetKsize(uint32(len(item.Key()))) + elem.SetVsize(uint32(len(item.Value()))) + } else { + elem := p.BranchPageElement(uint16(i)) + elem.SetPos(uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))) + elem.SetKsize(uint32(len(item.Key()))) + elem.SetPgid(item.Pgid()) + Assert(elem.Pgid() != p.Id(), "write: circular dependency occurred") + } + + // Write data for the element to the end of the page. + l := copy(b, item.Key()) + copy(b[l:], item.Value()) + } + + return uint32(off) +} + +func UsedSpaceInPage(inodes Inodes, p *Page) uint32 { + off := unsafe.Sizeof(*p) + p.PageElementSize()*uintptr(len(inodes)) + for _, item := range inodes { + sz := len(item.Key()) + len(item.Value()) + off += uintptr(sz) + } + + return uint32(off) +} diff --git a/vendor/go.etcd.io/bbolt/internal/common/meta.go b/vendor/go.etcd.io/bbolt/internal/common/meta.go new file mode 100644 index 0000000000000..055388604af81 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/common/meta.go @@ -0,0 +1,161 @@ +package common + +import ( + "fmt" + "hash/fnv" + "io" + "unsafe" + + "go.etcd.io/bbolt/errors" +) + +type Meta struct { + magic uint32 + version uint32 + pageSize uint32 + flags uint32 + root InBucket + freelist Pgid + pgid Pgid + txid Txid + checksum uint64 +} + +// Validate checks the marker bytes and version of the meta page to ensure it matches this binary. +func (m *Meta) Validate() error { + if m.magic != Magic { + return errors.ErrInvalid + } else if m.version != Version { + return errors.ErrVersionMismatch + } else if m.checksum != m.Sum64() { + return errors.ErrChecksum + } + return nil +} + +// Copy copies one meta object to another. +func (m *Meta) Copy(dest *Meta) { + *dest = *m +} + +// Write writes the meta onto a page. +func (m *Meta) Write(p *Page) { + if m.root.root >= m.pgid { + panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) + } else if m.freelist >= m.pgid && m.freelist != PgidNoFreelist { + // TODO: reject pgidNoFreeList if !NoFreelistSync + panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) + } + + // Page id is either going to be 0 or 1 which we can determine by the transaction ID. + p.id = Pgid(m.txid % 2) + p.SetFlags(MetaPageFlag) + + // Calculate the checksum. + m.checksum = m.Sum64() + + m.Copy(p.Meta()) +} + +// Sum64 generates the checksum for the meta. +func (m *Meta) Sum64() uint64 { + var h = fnv.New64a() + _, _ = h.Write((*[unsafe.Offsetof(Meta{}.checksum)]byte)(unsafe.Pointer(m))[:]) + return h.Sum64() +} + +func (m *Meta) Magic() uint32 { + return m.magic +} + +func (m *Meta) SetMagic(v uint32) { + m.magic = v +} + +func (m *Meta) Version() uint32 { + return m.version +} + +func (m *Meta) SetVersion(v uint32) { + m.version = v +} + +func (m *Meta) PageSize() uint32 { + return m.pageSize +} + +func (m *Meta) SetPageSize(v uint32) { + m.pageSize = v +} + +func (m *Meta) Flags() uint32 { + return m.flags +} + +func (m *Meta) SetFlags(v uint32) { + m.flags = v +} + +func (m *Meta) SetRootBucket(b InBucket) { + m.root = b +} + +func (m *Meta) RootBucket() *InBucket { + return &m.root +} + +func (m *Meta) Freelist() Pgid { + return m.freelist +} + +func (m *Meta) SetFreelist(v Pgid) { + m.freelist = v +} + +func (m *Meta) IsFreelistPersisted() bool { + return m.freelist != PgidNoFreelist +} + +func (m *Meta) Pgid() Pgid { + return m.pgid +} + +func (m *Meta) SetPgid(id Pgid) { + m.pgid = id +} + +func (m *Meta) Txid() Txid { + return m.txid +} + +func (m *Meta) SetTxid(id Txid) { + m.txid = id +} + +func (m *Meta) IncTxid() { + m.txid += 1 +} + +func (m *Meta) DecTxid() { + m.txid -= 1 +} + +func (m *Meta) Checksum() uint64 { + return m.checksum +} + +func (m *Meta) SetChecksum(v uint64) { + m.checksum = v +} + +func (m *Meta) Print(w io.Writer) { + fmt.Fprintf(w, "Version: %d\n", m.version) + fmt.Fprintf(w, "Page Size: %d bytes\n", m.pageSize) + fmt.Fprintf(w, "Flags: %08x\n", m.flags) + fmt.Fprintf(w, "Root: \n", m.root.root) + fmt.Fprintf(w, "Freelist: \n", m.freelist) + fmt.Fprintf(w, "HWM: \n", m.pgid) + fmt.Fprintf(w, "Txn ID: %d\n", m.txid) + fmt.Fprintf(w, "Checksum: %016x\n", m.checksum) + fmt.Fprintf(w, "\n") +} diff --git a/vendor/go.etcd.io/bbolt/internal/common/page.go b/vendor/go.etcd.io/bbolt/internal/common/page.go new file mode 100644 index 0000000000000..ee808967c5731 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/common/page.go @@ -0,0 +1,391 @@ +package common + +import ( + "fmt" + "os" + "sort" + "unsafe" +) + +const PageHeaderSize = unsafe.Sizeof(Page{}) + +const MinKeysPerPage = 2 + +const BranchPageElementSize = unsafe.Sizeof(branchPageElement{}) +const LeafPageElementSize = unsafe.Sizeof(leafPageElement{}) +const pgidSize = unsafe.Sizeof(Pgid(0)) + +const ( + BranchPageFlag = 0x01 + LeafPageFlag = 0x02 + MetaPageFlag = 0x04 + FreelistPageFlag = 0x10 +) + +const ( + BucketLeafFlag = 0x01 +) + +type Pgid uint64 + +type Page struct { + id Pgid + flags uint16 + count uint16 + overflow uint32 +} + +func NewPage(id Pgid, flags, count uint16, overflow uint32) *Page { + return &Page{ + id: id, + flags: flags, + count: count, + overflow: overflow, + } +} + +// Typ returns a human-readable page type string used for debugging. +func (p *Page) Typ() string { + if p.IsBranchPage() { + return "branch" + } else if p.IsLeafPage() { + return "leaf" + } else if p.IsMetaPage() { + return "meta" + } else if p.IsFreelistPage() { + return "freelist" + } + return fmt.Sprintf("unknown<%02x>", p.flags) +} + +func (p *Page) IsBranchPage() bool { + return p.flags == BranchPageFlag +} + +func (p *Page) IsLeafPage() bool { + return p.flags == LeafPageFlag +} + +func (p *Page) IsMetaPage() bool { + return p.flags == MetaPageFlag +} + +func (p *Page) IsFreelistPage() bool { + return p.flags == FreelistPageFlag +} + +// Meta returns a pointer to the metadata section of the page. +func (p *Page) Meta() *Meta { + return (*Meta)(UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))) +} + +func (p *Page) FastCheck(id Pgid) { + Assert(p.id == id, "Page expected to be: %v, but self identifies as %v", id, p.id) + // Only one flag of page-type can be set. + Assert(p.IsBranchPage() || + p.IsLeafPage() || + p.IsMetaPage() || + p.IsFreelistPage(), + "page %v: has unexpected type/flags: %x", p.id, p.flags) +} + +// LeafPageElement retrieves the leaf node by index +func (p *Page) LeafPageElement(index uint16) *leafPageElement { + return (*leafPageElement)(UnsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), + LeafPageElementSize, int(index))) +} + +// LeafPageElements retrieves a list of leaf nodes. +func (p *Page) LeafPageElements() []leafPageElement { + if p.count == 0 { + return nil + } + data := UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) + elems := unsafe.Slice((*leafPageElement)(data), int(p.count)) + return elems +} + +// BranchPageElement retrieves the branch node by index +func (p *Page) BranchPageElement(index uint16) *branchPageElement { + return (*branchPageElement)(UnsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), + unsafe.Sizeof(branchPageElement{}), int(index))) +} + +// BranchPageElements retrieves a list of branch nodes. +func (p *Page) BranchPageElements() []branchPageElement { + if p.count == 0 { + return nil + } + data := UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) + elems := unsafe.Slice((*branchPageElement)(data), int(p.count)) + return elems +} + +func (p *Page) FreelistPageCount() (int, int) { + Assert(p.IsFreelistPage(), fmt.Sprintf("can't get freelist page count from a non-freelist page: %2x", p.flags)) + + // If the page.count is at the max uint16 value (64k) then it's considered + // an overflow and the size of the freelist is stored as the first element. + var idx, count = 0, int(p.count) + if count == 0xFFFF { + idx = 1 + c := *(*Pgid)(UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))) + count = int(c) + if count < 0 { + panic(fmt.Sprintf("leading element count %d overflows int", c)) + } + } + + return idx, count +} + +func (p *Page) FreelistPageIds() []Pgid { + Assert(p.IsFreelistPage(), fmt.Sprintf("can't get freelist page IDs from a non-freelist page: %2x", p.flags)) + + idx, count := p.FreelistPageCount() + + if count == 0 { + return nil + } + + data := UnsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), pgidSize, idx) + ids := unsafe.Slice((*Pgid)(data), count) + + return ids +} + +// dump writes n bytes of the page to STDERR as hex output. +func (p *Page) hexdump(n int) { + buf := UnsafeByteSlice(unsafe.Pointer(p), 0, 0, n) + fmt.Fprintf(os.Stderr, "%x\n", buf) +} + +func (p *Page) PageElementSize() uintptr { + if p.IsLeafPage() { + return LeafPageElementSize + } + return BranchPageElementSize +} + +func (p *Page) Id() Pgid { + return p.id +} + +func (p *Page) SetId(target Pgid) { + p.id = target +} + +func (p *Page) Flags() uint16 { + return p.flags +} + +func (p *Page) SetFlags(v uint16) { + p.flags = v +} + +func (p *Page) Count() uint16 { + return p.count +} + +func (p *Page) SetCount(target uint16) { + p.count = target +} + +func (p *Page) Overflow() uint32 { + return p.overflow +} + +func (p *Page) SetOverflow(target uint32) { + p.overflow = target +} + +func (p *Page) String() string { + return fmt.Sprintf("ID: %d, Type: %s, count: %d, overflow: %d", p.id, p.Typ(), p.count, p.overflow) +} + +type Pages []*Page + +func (s Pages) Len() int { return len(s) } +func (s Pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s Pages) Less(i, j int) bool { return s[i].id < s[j].id } + +// branchPageElement represents a node on a branch page. +type branchPageElement struct { + pos uint32 + ksize uint32 + pgid Pgid +} + +func (n *branchPageElement) Pos() uint32 { + return n.pos +} + +func (n *branchPageElement) SetPos(v uint32) { + n.pos = v +} + +func (n *branchPageElement) Ksize() uint32 { + return n.ksize +} + +func (n *branchPageElement) SetKsize(v uint32) { + n.ksize = v +} + +func (n *branchPageElement) Pgid() Pgid { + return n.pgid +} + +func (n *branchPageElement) SetPgid(v Pgid) { + n.pgid = v +} + +// Key returns a byte slice of the node key. +func (n *branchPageElement) Key() []byte { + return UnsafeByteSlice(unsafe.Pointer(n), 0, int(n.pos), int(n.pos)+int(n.ksize)) +} + +// leafPageElement represents a node on a leaf page. +type leafPageElement struct { + flags uint32 + pos uint32 + ksize uint32 + vsize uint32 +} + +func NewLeafPageElement(flags, pos, ksize, vsize uint32) *leafPageElement { + return &leafPageElement{ + flags: flags, + pos: pos, + ksize: ksize, + vsize: vsize, + } +} + +func (n *leafPageElement) Flags() uint32 { + return n.flags +} + +func (n *leafPageElement) SetFlags(v uint32) { + n.flags = v +} + +func (n *leafPageElement) Pos() uint32 { + return n.pos +} + +func (n *leafPageElement) SetPos(v uint32) { + n.pos = v +} + +func (n *leafPageElement) Ksize() uint32 { + return n.ksize +} + +func (n *leafPageElement) SetKsize(v uint32) { + n.ksize = v +} + +func (n *leafPageElement) Vsize() uint32 { + return n.vsize +} + +func (n *leafPageElement) SetVsize(v uint32) { + n.vsize = v +} + +// Key returns a byte slice of the node key. +func (n *leafPageElement) Key() []byte { + i := int(n.pos) + j := i + int(n.ksize) + return UnsafeByteSlice(unsafe.Pointer(n), 0, i, j) +} + +// Value returns a byte slice of the node value. +func (n *leafPageElement) Value() []byte { + i := int(n.pos) + int(n.ksize) + j := i + int(n.vsize) + return UnsafeByteSlice(unsafe.Pointer(n), 0, i, j) +} + +func (n *leafPageElement) IsBucketEntry() bool { + return n.flags&uint32(BucketLeafFlag) != 0 +} + +func (n *leafPageElement) Bucket() *InBucket { + if n.IsBucketEntry() { + return LoadBucket(n.Value()) + } else { + return nil + } +} + +// PageInfo represents human readable information about a page. +type PageInfo struct { + ID int + Type string + Count int + OverflowCount int +} + +type Pgids []Pgid + +func (s Pgids) Len() int { return len(s) } +func (s Pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s Pgids) Less(i, j int) bool { return s[i] < s[j] } + +// Merge returns the sorted union of a and b. +func (a Pgids) Merge(b Pgids) Pgids { + // Return the opposite slice if one is nil. + if len(a) == 0 { + return b + } + if len(b) == 0 { + return a + } + merged := make(Pgids, len(a)+len(b)) + Mergepgids(merged, a, b) + return merged +} + +// Mergepgids copies the sorted union of a and b into dst. +// If dst is too small, it panics. +func Mergepgids(dst, a, b Pgids) { + if len(dst) < len(a)+len(b) { + panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b))) + } + // Copy in the opposite slice if one is nil. + if len(a) == 0 { + copy(dst, b) + return + } + if len(b) == 0 { + copy(dst, a) + return + } + + // Merged will hold all elements from both lists. + merged := dst[:0] + + // Assign lead to the slice with a lower starting value, follow to the higher value. + lead, follow := a, b + if b[0] < a[0] { + lead, follow = b, a + } + + // Continue while there are elements in the lead. + for len(lead) > 0 { + // Merge largest prefix of lead that is ahead of follow[0]. + n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] }) + merged = append(merged, lead[:n]...) + if n >= len(lead) { + break + } + + // Swap lead and follow. + lead, follow = follow, lead[n:] + } + + // Append what's left in follow. + _ = append(merged, follow...) +} diff --git a/vendor/go.etcd.io/bbolt/internal/common/types.go b/vendor/go.etcd.io/bbolt/internal/common/types.go new file mode 100644 index 0000000000000..8ad8279a090a6 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/common/types.go @@ -0,0 +1,40 @@ +package common + +import ( + "os" + "runtime" + "time" +) + +// MaxMmapStep is the largest step that can be taken when remapping the mmap. +const MaxMmapStep = 1 << 30 // 1GB + +// Version represents the data file format version. +const Version uint32 = 2 + +// Magic represents a marker value to indicate that a file is a Bolt DB. +const Magic uint32 = 0xED0CDAED + +const PgidNoFreelist Pgid = 0xffffffffffffffff + +// DO NOT EDIT. Copied from the "bolt" package. +const pageMaxAllocSize = 0xFFFFFFF + +// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when +// syncing changes to a file. This is required as some operating systems, +// such as OpenBSD, do not have a unified buffer cache (UBC) and writes +// must be synchronized using the msync(2) syscall. +const IgnoreNoSync = runtime.GOOS == "openbsd" + +// Default values if not set in a DB instance. +const ( + DefaultMaxBatchSize int = 1000 + DefaultMaxBatchDelay = 10 * time.Millisecond + DefaultAllocSize = 16 * 1024 * 1024 +) + +// DefaultPageSize is the default page size for db which is set to the OS page size. +var DefaultPageSize = os.Getpagesize() + +// Txid represents the internal transaction identifier. +type Txid uint64 diff --git a/vendor/go.etcd.io/bbolt/unsafe.go b/vendor/go.etcd.io/bbolt/internal/common/unsafe.go similarity index 74% rename from vendor/go.etcd.io/bbolt/unsafe.go rename to vendor/go.etcd.io/bbolt/internal/common/unsafe.go index 7745d32ce194f..9b77dd7b2b807 100644 --- a/vendor/go.etcd.io/bbolt/unsafe.go +++ b/vendor/go.etcd.io/bbolt/internal/common/unsafe.go @@ -1,18 +1,18 @@ -package bbolt +package common import ( "unsafe" ) -func unsafeAdd(base unsafe.Pointer, offset uintptr) unsafe.Pointer { +func UnsafeAdd(base unsafe.Pointer, offset uintptr) unsafe.Pointer { return unsafe.Pointer(uintptr(base) + offset) } -func unsafeIndex(base unsafe.Pointer, offset uintptr, elemsz uintptr, n int) unsafe.Pointer { +func UnsafeIndex(base unsafe.Pointer, offset uintptr, elemsz uintptr, n int) unsafe.Pointer { return unsafe.Pointer(uintptr(base) + offset + uintptr(n)*elemsz) } -func unsafeByteSlice(base unsafe.Pointer, offset uintptr, i, j int) []byte { +func UnsafeByteSlice(base unsafe.Pointer, offset uintptr, i, j int) []byte { // See: https://github.com/golang/go/wiki/cgo#turning-c-arrays-into-go-slices // // This memory is not allocated from C, but it is unmanaged by Go's @@ -23,5 +23,5 @@ func unsafeByteSlice(base unsafe.Pointer, offset uintptr, i, j int) []byte { // index 0. However, the wiki never says that the address must be to // the beginning of a C allocation (or even that malloc was used at // all), so this is believed to be correct. - return (*[maxAllocSize]byte)(unsafeAdd(base, offset))[i:j:j] + return (*[pageMaxAllocSize]byte)(UnsafeAdd(base, offset))[i:j:j] } diff --git a/vendor/go.etcd.io/bbolt/internal/common/utils.go b/vendor/go.etcd.io/bbolt/internal/common/utils.go new file mode 100644 index 0000000000000..bdf82a7b00bc3 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/common/utils.go @@ -0,0 +1,64 @@ +package common + +import ( + "fmt" + "io" + "os" + "unsafe" +) + +func LoadBucket(buf []byte) *InBucket { + return (*InBucket)(unsafe.Pointer(&buf[0])) +} + +func LoadPage(buf []byte) *Page { + return (*Page)(unsafe.Pointer(&buf[0])) +} + +func LoadPageMeta(buf []byte) *Meta { + return (*Meta)(unsafe.Pointer(&buf[PageHeaderSize])) +} + +func CopyFile(srcPath, dstPath string) error { + // Ensure source file exists. + _, err := os.Stat(srcPath) + if os.IsNotExist(err) { + return fmt.Errorf("source file %q not found", srcPath) + } else if err != nil { + return err + } + + // Ensure output file not exist. + _, err = os.Stat(dstPath) + if err == nil { + return fmt.Errorf("output file %q already exists", dstPath) + } else if !os.IsNotExist(err) { + return err + } + + srcDB, err := os.Open(srcPath) + if err != nil { + return fmt.Errorf("failed to open source file %q: %w", srcPath, err) + } + defer srcDB.Close() + dstDB, err := os.Create(dstPath) + if err != nil { + return fmt.Errorf("failed to create output file %q: %w", dstPath, err) + } + defer dstDB.Close() + written, err := io.Copy(dstDB, srcDB) + if err != nil { + return fmt.Errorf("failed to copy database file from %q to %q: %w", srcPath, dstPath, err) + } + + srcFi, err := srcDB.Stat() + if err != nil { + return fmt.Errorf("failed to get source file info %q: %w", srcPath, err) + } + initialSize := srcFi.Size() + if initialSize != written { + return fmt.Errorf("the byte copied (%q: %d) isn't equal to the initial db size (%q: %d)", dstPath, written, srcPath, initialSize) + } + + return nil +} diff --git a/vendor/go.etcd.io/bbolt/internal/common/verify.go b/vendor/go.etcd.io/bbolt/internal/common/verify.go new file mode 100644 index 0000000000000..eac95e26301bc --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/common/verify.go @@ -0,0 +1,67 @@ +// Copied from https://github.com/etcd-io/etcd/blob/main/client/pkg/verify/verify.go +package common + +import ( + "fmt" + "os" + "strings" +) + +const ENV_VERIFY = "BBOLT_VERIFY" + +type VerificationType string + +const ( + ENV_VERIFY_VALUE_ALL VerificationType = "all" + ENV_VERIFY_VALUE_ASSERT VerificationType = "assert" +) + +func getEnvVerify() string { + return strings.ToLower(os.Getenv(ENV_VERIFY)) +} + +func IsVerificationEnabled(verification VerificationType) bool { + env := getEnvVerify() + return env == string(ENV_VERIFY_VALUE_ALL) || env == strings.ToLower(string(verification)) +} + +// EnableVerifications sets `ENV_VERIFY` and returns a function that +// can be used to bring the original settings. +func EnableVerifications(verification VerificationType) func() { + previousEnv := getEnvVerify() + os.Setenv(ENV_VERIFY, string(verification)) + return func() { + os.Setenv(ENV_VERIFY, previousEnv) + } +} + +// EnableAllVerifications enables verification and returns a function +// that can be used to bring the original settings. +func EnableAllVerifications() func() { + return EnableVerifications(ENV_VERIFY_VALUE_ALL) +} + +// DisableVerifications unsets `ENV_VERIFY` and returns a function that +// can be used to bring the original settings. +func DisableVerifications() func() { + previousEnv := getEnvVerify() + os.Unsetenv(ENV_VERIFY) + return func() { + os.Setenv(ENV_VERIFY, previousEnv) + } +} + +// Verify performs verification if the assertions are enabled. +// In the default setup running in tests and skipped in the production code. +func Verify(f func()) { + if IsVerificationEnabled(ENV_VERIFY_VALUE_ASSERT) { + f() + } +} + +// Assert will panic with a given formatted message if the given condition is false. +func Assert(condition bool, msg string, v ...any) { + if !condition { + panic(fmt.Sprintf("assertion failed: "+msg, v...)) + } +} diff --git a/vendor/go.etcd.io/bbolt/internal/freelist/array.go b/vendor/go.etcd.io/bbolt/internal/freelist/array.go new file mode 100644 index 0000000000000..0cc1ba7150311 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/freelist/array.go @@ -0,0 +1,108 @@ +package freelist + +import ( + "fmt" + "sort" + + "go.etcd.io/bbolt/internal/common" +) + +type array struct { + *shared + + ids []common.Pgid // all free and available free page ids. +} + +func (f *array) Init(ids common.Pgids) { + f.ids = ids + f.reindex() +} + +func (f *array) Allocate(txid common.Txid, n int) common.Pgid { + if len(f.ids) == 0 { + return 0 + } + + var initial, previd common.Pgid + for i, id := range f.ids { + if id <= 1 { + panic(fmt.Sprintf("invalid page allocation: %d", id)) + } + + // Reset initial page if this is not contiguous. + if previd == 0 || id-previd != 1 { + initial = id + } + + // If we found a contiguous block then remove it and return it. + if (id-initial)+1 == common.Pgid(n) { + // If we're allocating off the beginning then take the fast path + // and just adjust the existing slice. This will use extra memory + // temporarily but the append() in free() will realloc the slice + // as is necessary. + if (i + 1) == n { + f.ids = f.ids[i+1:] + } else { + copy(f.ids[i-n+1:], f.ids[i+1:]) + f.ids = f.ids[:len(f.ids)-n] + } + + // Remove from the free cache. + for i := common.Pgid(0); i < common.Pgid(n); i++ { + delete(f.cache, initial+i) + } + f.allocs[initial] = txid + return initial + } + + previd = id + } + return 0 +} + +func (f *array) FreeCount() int { + return len(f.ids) +} + +func (f *array) freePageIds() common.Pgids { + return f.ids +} + +func (f *array) mergeSpans(ids common.Pgids) { + sort.Sort(ids) + common.Verify(func() { + idsIdx := make(map[common.Pgid]struct{}) + for _, id := range f.ids { + // The existing f.ids shouldn't have duplicated free ID. + if _, ok := idsIdx[id]; ok { + panic(fmt.Sprintf("detected duplicated free page ID: %d in existing f.ids: %v", id, f.ids)) + } + idsIdx[id] = struct{}{} + } + + prev := common.Pgid(0) + for _, id := range ids { + // The ids shouldn't have duplicated free ID. Note page 0 and 1 + // are reserved for meta pages, so they can never be free page IDs. + if prev == id { + panic(fmt.Sprintf("detected duplicated free ID: %d in ids: %v", id, ids)) + } + prev = id + + // The ids shouldn't have any overlap with the existing f.ids. + if _, ok := idsIdx[id]; ok { + panic(fmt.Sprintf("detected overlapped free page ID: %d between ids: %v and existing f.ids: %v", id, ids, f.ids)) + } + } + }) + f.ids = common.Pgids(f.ids).Merge(ids) +} + +func NewArrayFreelist() Interface { + a := &array{ + shared: newShared(), + ids: []common.Pgid{}, + } + a.Interface = a + return a +} diff --git a/vendor/go.etcd.io/bbolt/internal/freelist/freelist.go b/vendor/go.etcd.io/bbolt/internal/freelist/freelist.go new file mode 100644 index 0000000000000..2b819506bda64 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/freelist/freelist.go @@ -0,0 +1,82 @@ +package freelist + +import ( + "go.etcd.io/bbolt/internal/common" +) + +type ReadWriter interface { + // Read calls Init with the page ids stored in the given page. + Read(page *common.Page) + + // Write writes the freelist into the given page. + Write(page *common.Page) + + // EstimatedWritePageSize returns the size in bytes of the freelist after serialization in Write. + // This should never underestimate the size. + EstimatedWritePageSize() int +} + +type Interface interface { + ReadWriter + + // Init initializes this freelist with the given list of pages. + Init(ids common.Pgids) + + // Allocate tries to allocate the given number of contiguous pages + // from the free list pages. It returns the starting page ID if + // available; otherwise, it returns 0. + Allocate(txid common.Txid, numPages int) common.Pgid + + // Count returns the number of free and pending pages. + Count() int + + // FreeCount returns the number of free pages. + FreeCount() int + + // PendingCount returns the number of pending pages. + PendingCount() int + + // AddReadonlyTXID adds a given read-only transaction id for pending page tracking. + AddReadonlyTXID(txid common.Txid) + + // RemoveReadonlyTXID removes a given read-only transaction id for pending page tracking. + RemoveReadonlyTXID(txid common.Txid) + + // ReleasePendingPages releases any pages associated with closed read-only transactions. + ReleasePendingPages() + + // Free releases a page and its overflow for a given transaction id. + // If the page is already free or is one of the meta pages, then a panic will occur. + Free(txId common.Txid, p *common.Page) + + // Freed returns whether a given page is in the free list. + Freed(pgId common.Pgid) bool + + // Rollback removes the pages from a given pending tx. + Rollback(txId common.Txid) + + // Copyall copies a list of all free ids and all pending ids in one sorted list. + // f.count returns the minimum length required for dst. + Copyall(dst []common.Pgid) + + // Reload reads the freelist from a page and filters out pending items. + Reload(p *common.Page) + + // NoSyncReload reads the freelist from Pgids and filters out pending items. + NoSyncReload(pgIds common.Pgids) + + // freePageIds returns the IDs of all free pages. Returns an empty slice if no free pages are available. + freePageIds() common.Pgids + + // pendingPageIds returns all pending pages by transaction id. + pendingPageIds() map[common.Txid]*txPending + + // release moves all page ids for a transaction id (or older) to the freelist. + release(txId common.Txid) + + // releaseRange moves pending pages allocated within an extent [begin,end] to the free list. + releaseRange(begin, end common.Txid) + + // mergeSpans is merging the given pages into the freelist + mergeSpans(ids common.Pgids) +} diff --git a/vendor/go.etcd.io/bbolt/internal/freelist/hashmap.go b/vendor/go.etcd.io/bbolt/internal/freelist/hashmap.go new file mode 100644 index 0000000000000..8d471f4b5bf1a --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/freelist/hashmap.go @@ -0,0 +1,292 @@ +package freelist + +import ( + "fmt" + "reflect" + "sort" + + "go.etcd.io/bbolt/internal/common" +) + +// pidSet holds the set of starting pgids which have the same span size +type pidSet map[common.Pgid]struct{} + +type hashMap struct { + *shared + + freePagesCount uint64 // count of free pages(hashmap version) + freemaps map[uint64]pidSet // key is the size of continuous pages(span), value is a set which contains the starting pgids of same size + forwardMap map[common.Pgid]uint64 // key is start pgid, value is its span size + backwardMap map[common.Pgid]uint64 // key is end pgid, value is its span size +} + +func (f *hashMap) Init(pgids common.Pgids) { + // reset the counter when freelist init + f.freePagesCount = 0 + f.freemaps = make(map[uint64]pidSet) + f.forwardMap = make(map[common.Pgid]uint64) + f.backwardMap = make(map[common.Pgid]uint64) + + if len(pgids) == 0 { + return + } + + if !sort.SliceIsSorted([]common.Pgid(pgids), func(i, j int) bool { return pgids[i] < pgids[j] }) { + panic("pgids not sorted") + } + + size := uint64(1) + start := pgids[0] + + for i := 1; i < len(pgids); i++ { + // continuous page + if pgids[i] == pgids[i-1]+1 { + size++ + } else { + f.addSpan(start, size) + + size = 1 + start = pgids[i] + } + } + + // init the tail + if size != 0 && start != 0 { + f.addSpan(start, size) + } + + f.reindex() +} + +func (f *hashMap) Allocate(txid common.Txid, n int) common.Pgid { + if n == 0 { + return 0 + } + + // if we have a exact size match just return short path + if bm, ok := f.freemaps[uint64(n)]; ok { + for pid := range bm { + // remove the span + f.delSpan(pid, uint64(n)) + + f.allocs[pid] = txid + + for i := common.Pgid(0); i < common.Pgid(n); i++ { + delete(f.cache, pid+i) + } + return pid + } + } + + // lookup the map to find larger span + for size, bm := range f.freemaps { + if size < uint64(n) { + continue + } + + for pid := range bm { + // remove the initial + f.delSpan(pid, size) + + f.allocs[pid] = txid + + remain := size - uint64(n) + + // add remain span + f.addSpan(pid+common.Pgid(n), remain) + + for i := common.Pgid(0); i < common.Pgid(n); i++ { + delete(f.cache, pid+i) + } + return pid + } + } + + return 0 +} + +func (f *hashMap) FreeCount() int { + common.Verify(func() { + expectedFreePageCount := f.hashmapFreeCountSlow() + common.Assert(int(f.freePagesCount) == expectedFreePageCount, + "freePagesCount (%d) is out of sync with free pages map (%d)", f.freePagesCount, expectedFreePageCount) + }) + return int(f.freePagesCount) +} + +func (f *hashMap) freePageIds() common.Pgids { + count := f.FreeCount() + if count == 0 { + return common.Pgids{} + } + + m := make([]common.Pgid, 0, count) + + startPageIds := make([]common.Pgid, 0, len(f.forwardMap)) + for k := range f.forwardMap { + startPageIds = append(startPageIds, k) + } + sort.Sort(common.Pgids(startPageIds)) + + for _, start := range startPageIds { + if size, ok := f.forwardMap[start]; ok { + for i := 0; i < int(size); i++ { + m = append(m, start+common.Pgid(i)) + } + } + } + + return m +} + +func (f *hashMap) hashmapFreeCountSlow() int { + count := 0 + for _, size := range f.forwardMap { + count += int(size) + } + return count +} + +func (f *hashMap) addSpan(start common.Pgid, size uint64) { + f.backwardMap[start-1+common.Pgid(size)] = size + f.forwardMap[start] = size + if _, ok := f.freemaps[size]; !ok { + f.freemaps[size] = make(map[common.Pgid]struct{}) + } + + f.freemaps[size][start] = struct{}{} + f.freePagesCount += size +} + +func (f *hashMap) delSpan(start common.Pgid, size uint64) { + delete(f.forwardMap, start) + delete(f.backwardMap, start+common.Pgid(size-1)) + delete(f.freemaps[size], start) + if len(f.freemaps[size]) == 0 { + delete(f.freemaps, size) + } + f.freePagesCount -= size +} + +func (f *hashMap) mergeSpans(ids common.Pgids) { + common.Verify(func() { + ids1Freemap := f.idsFromFreemaps() + ids2Forward := f.idsFromForwardMap() + ids3Backward := f.idsFromBackwardMap() + + if !reflect.DeepEqual(ids1Freemap, ids2Forward) { + panic(fmt.Sprintf("Detected mismatch, f.freemaps: %v, f.forwardMap: %v", f.freemaps, f.forwardMap)) + } + if !reflect.DeepEqual(ids1Freemap, ids3Backward) { + panic(fmt.Sprintf("Detected mismatch, f.freemaps: %v, f.backwardMap: %v", f.freemaps, f.backwardMap)) + } + + sort.Sort(ids) + prev := common.Pgid(0) + for _, id := range ids { + // The ids shouldn't have duplicated free ID. + if prev == id { + panic(fmt.Sprintf("detected duplicated free ID: %d in ids: %v", id, ids)) + } + prev = id + + // The ids shouldn't have any overlap with the existing f.freemaps. + if _, ok := ids1Freemap[id]; ok { + panic(fmt.Sprintf("detected overlapped free page ID: %d between ids: %v and existing f.freemaps: %v", id, ids, f.freemaps)) + } + } + }) + for _, id := range ids { + // try to see if we can merge and update + f.mergeWithExistingSpan(id) + } +} + +// mergeWithExistingSpan merges pid to the existing free spans, try to merge it backward and forward +func (f *hashMap) mergeWithExistingSpan(pid common.Pgid) { + prev := pid - 1 + next := pid + 1 + + preSize, mergeWithPrev := f.backwardMap[prev] + nextSize, mergeWithNext := f.forwardMap[next] + newStart := pid + newSize := uint64(1) + + if mergeWithPrev { + //merge with previous span + start := prev + 1 - common.Pgid(preSize) + f.delSpan(start, preSize) + + newStart -= common.Pgid(preSize) + newSize += preSize + } + + if mergeWithNext { + // merge with next span + f.delSpan(next, nextSize) + newSize += nextSize + } + + f.addSpan(newStart, newSize) +} + +// idsFromFreemaps get all free page IDs from f.freemaps. +// used by test only. +func (f *hashMap) idsFromFreemaps() map[common.Pgid]struct{} { + ids := make(map[common.Pgid]struct{}) + for size, idSet := range f.freemaps { + for start := range idSet { + for i := 0; i < int(size); i++ { + id := start + common.Pgid(i) + if _, ok := ids[id]; ok { + panic(fmt.Sprintf("detected duplicated free page ID: %d in f.freemaps: %v", id, f.freemaps)) + } + ids[id] = struct{}{} + } + } + } + return ids +} + +// idsFromForwardMap get all free page IDs from f.forwardMap. +// used by test only. +func (f *hashMap) idsFromForwardMap() map[common.Pgid]struct{} { + ids := make(map[common.Pgid]struct{}) + for start, size := range f.forwardMap { + for i := 0; i < int(size); i++ { + id := start + common.Pgid(i) + if _, ok := ids[id]; ok { + panic(fmt.Sprintf("detected duplicated free page ID: %d in f.forwardMap: %v", id, f.forwardMap)) + } + ids[id] = struct{}{} + } + } + return ids +} + +// idsFromBackwardMap get all free page IDs from f.backwardMap. +// used by test only. +func (f *hashMap) idsFromBackwardMap() map[common.Pgid]struct{} { + ids := make(map[common.Pgid]struct{}) + for end, size := range f.backwardMap { + for i := 0; i < int(size); i++ { + id := end - common.Pgid(i) + if _, ok := ids[id]; ok { + panic(fmt.Sprintf("detected duplicated free page ID: %d in f.backwardMap: %v", id, f.backwardMap)) + } + ids[id] = struct{}{} + } + } + return ids +} + +func NewHashMapFreelist() Interface { + hm := &hashMap{ + shared: newShared(), + freemaps: make(map[uint64]pidSet), + forwardMap: make(map[common.Pgid]uint64), + backwardMap: make(map[common.Pgid]uint64), + } + hm.Interface = hm + return hm +} diff --git a/vendor/go.etcd.io/bbolt/internal/freelist/shared.go b/vendor/go.etcd.io/bbolt/internal/freelist/shared.go new file mode 100644 index 0000000000000..f2d1130083f37 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/freelist/shared.go @@ -0,0 +1,310 @@ +package freelist + +import ( + "fmt" + "math" + "sort" + "unsafe" + + "go.etcd.io/bbolt/internal/common" +) + +type txPending struct { + ids []common.Pgid + alloctx []common.Txid // txids allocating the ids + lastReleaseBegin common.Txid // beginning txid of last matching releaseRange +} + +type shared struct { + Interface + + readonlyTXIDs []common.Txid // all readonly transaction IDs. + allocs map[common.Pgid]common.Txid // mapping of Txid that allocated a pgid. + cache map[common.Pgid]struct{} // fast lookup of all free and pending page ids. + pending map[common.Txid]*txPending // mapping of soon-to-be free page ids by tx. +} + +func newShared() *shared { + return &shared{ + pending: make(map[common.Txid]*txPending), + allocs: make(map[common.Pgid]common.Txid), + cache: make(map[common.Pgid]struct{}), + } +} + +func (t *shared) pendingPageIds() map[common.Txid]*txPending { + return t.pending +} + +func (t *shared) PendingCount() int { + var count int + for _, txp := range t.pending { + count += len(txp.ids) + } + return count +} + +func (t *shared) Count() int { + return t.FreeCount() + t.PendingCount() +} + +func (t *shared) Freed(pgId common.Pgid) bool { + _, ok := t.cache[pgId] + return ok +} + +func (t *shared) Free(txid common.Txid, p *common.Page) { + if p.Id() <= 1 { + panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.Id())) + } + + // Free page and all its overflow pages. + txp := t.pending[txid] + if txp == nil { + txp = &txPending{} + t.pending[txid] = txp + } + allocTxid, ok := t.allocs[p.Id()] + common.Verify(func() { + if allocTxid == txid { + panic(fmt.Sprintf("free: freed page (%d) was allocated by the same transaction (%d)", p.Id(), txid)) + } + }) + if ok { + delete(t.allocs, p.Id()) + } + + for id := p.Id(); id <= p.Id()+common.Pgid(p.Overflow()); id++ { + // Verify that page is not already free. + if _, ok := t.cache[id]; ok { + panic(fmt.Sprintf("page %d already freed", id)) + } + // Add to the freelist and cache. + txp.ids = append(txp.ids, id) + txp.alloctx = append(txp.alloctx, allocTxid) + t.cache[id] = struct{}{} + } +} + +func (t *shared) Rollback(txid common.Txid) { + // Remove page ids from cache. + txp := t.pending[txid] + if txp == nil { + return + } + for i, pgid := range txp.ids { + delete(t.cache, pgid) + tx := txp.alloctx[i] + if tx == 0 { + continue + } + if tx != txid { + // Pending free aborted; restore page back to alloc list. + t.allocs[pgid] = tx + } else { + // A writing TXN should never free a page which was allocated by itself. + panic(fmt.Sprintf("rollback: freed page (%d) was allocated by the same transaction (%d)", pgid, txid)) + } + } + // Remove pages from pending list and mark as free if allocated by txid. + delete(t.pending, txid) + + // Remove pgids which are allocated by this txid + for pgid, tid := range t.allocs { + if tid == txid { + delete(t.allocs, pgid) + } + } +} + +func (t *shared) AddReadonlyTXID(tid common.Txid) { + t.readonlyTXIDs = append(t.readonlyTXIDs, tid) +} + +func (t *shared) RemoveReadonlyTXID(tid common.Txid) { + for i := range t.readonlyTXIDs { + if t.readonlyTXIDs[i] == tid { + last := len(t.readonlyTXIDs) - 1 + t.readonlyTXIDs[i] = t.readonlyTXIDs[last] + t.readonlyTXIDs = t.readonlyTXIDs[:last] + break + } + } +} + +type txIDx []common.Txid + +func (t txIDx) Len() int { return len(t) } +func (t txIDx) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t txIDx) Less(i, j int) bool { return t[i] < t[j] } + +func (t *shared) ReleasePendingPages() { + // Free all pending pages prior to the earliest open transaction. + sort.Sort(txIDx(t.readonlyTXIDs)) + minid := common.Txid(math.MaxUint64) + if len(t.readonlyTXIDs) > 0 { + minid = t.readonlyTXIDs[0] + } + if minid > 0 { + t.release(minid - 1) + } + // Release unused txid extents. + for _, tid := range t.readonlyTXIDs { + t.releaseRange(minid, tid-1) + minid = tid + 1 + } + t.releaseRange(minid, common.Txid(math.MaxUint64)) + // Any page both allocated and freed in an extent is safe to release. +} + +func (t *shared) release(txid common.Txid) { + m := make(common.Pgids, 0) + for tid, txp := range t.pending { + if tid <= txid { + // Move transaction's pending pages to the available freelist. + // Don't remove from the cache since the page is still free. + m = append(m, txp.ids...) + delete(t.pending, tid) + } + } + t.mergeSpans(m) +} + +func (t *shared) releaseRange(begin, end common.Txid) { + if begin > end { + return + } + m := common.Pgids{} + for tid, txp := range t.pending { + if tid < begin || tid > end { + continue + } + // Don't recompute freed pages if ranges haven't updated. + if txp.lastReleaseBegin == begin { + continue + } + for i := 0; i < len(txp.ids); i++ { + if atx := txp.alloctx[i]; atx < begin || atx > end { + continue + } + m = append(m, txp.ids[i]) + txp.ids[i] = txp.ids[len(txp.ids)-1] + txp.ids = txp.ids[:len(txp.ids)-1] + txp.alloctx[i] = txp.alloctx[len(txp.alloctx)-1] + txp.alloctx = txp.alloctx[:len(txp.alloctx)-1] + i-- + } + txp.lastReleaseBegin = begin + if len(txp.ids) == 0 { + delete(t.pending, tid) + } + } + t.mergeSpans(m) +} + +// Copyall copies a list of all free ids and all pending ids in one sorted list. +// f.count returns the minimum length required for dst. +func (t *shared) Copyall(dst []common.Pgid) { + m := make(common.Pgids, 0, t.PendingCount()) + for _, txp := range t.pendingPageIds() { + m = append(m, txp.ids...) + } + sort.Sort(m) + common.Mergepgids(dst, t.freePageIds(), m) +} + +func (t *shared) Reload(p *common.Page) { + t.Read(p) + t.NoSyncReload(t.freePageIds()) +} + +func (t *shared) NoSyncReload(pgIds common.Pgids) { + // Build a cache of only pending pages. + pcache := make(map[common.Pgid]bool) + for _, txp := range t.pending { + for _, pendingID := range txp.ids { + pcache[pendingID] = true + } + } + + // Check each page in the freelist and build a new available freelist + // with any pages not in the pending lists. + a := []common.Pgid{} + for _, id := range pgIds { + if !pcache[id] { + a = append(a, id) + } + } + + t.Init(a) +} + +// reindex rebuilds the free cache based on available and pending free lists. +func (t *shared) reindex() { + free := t.freePageIds() + pending := t.pendingPageIds() + t.cache = make(map[common.Pgid]struct{}, len(free)) + for _, id := range free { + t.cache[id] = struct{}{} + } + for _, txp := range pending { + for _, pendingID := range txp.ids { + t.cache[pendingID] = struct{}{} + } + } +} + +func (t *shared) Read(p *common.Page) { + if !p.IsFreelistPage() { + panic(fmt.Sprintf("invalid freelist page: %d, page type is %s", p.Id(), p.Typ())) + } + + ids := p.FreelistPageIds() + + // Copy the list of page ids from the freelist. + if len(ids) == 0 { + t.Init([]common.Pgid{}) + } else { + // copy the ids, so we don't modify on the freelist page directly + idsCopy := make([]common.Pgid, len(ids)) + copy(idsCopy, ids) + // Make sure they're sorted. + sort.Sort(common.Pgids(idsCopy)) + + t.Init(idsCopy) + } +} + +func (t *shared) EstimatedWritePageSize() int { + n := t.Count() + if n >= 0xFFFF { + // The first element will be used to store the count. See freelist.write. + n++ + } + return int(common.PageHeaderSize) + (int(unsafe.Sizeof(common.Pgid(0))) * n) +} + +func (t *shared) Write(p *common.Page) { + // Combine the old free pgids and pgids waiting on an open transaction. + + // Update the header flag. + p.SetFlags(common.FreelistPageFlag) + + // The page.count can only hold up to 64k elements so if we overflow that + // number then we handle it by putting the size in the first element. + l := t.Count() + if l == 0 { + p.SetCount(uint16(l)) + } else if l < 0xFFFF { + p.SetCount(uint16(l)) + data := common.UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) + ids := unsafe.Slice((*common.Pgid)(data), l) + t.Copyall(ids) + } else { + p.SetCount(0xFFFF) + data := common.UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) + ids := unsafe.Slice((*common.Pgid)(data), l+1) + ids[0] = common.Pgid(l) + t.Copyall(ids[1:]) + } +} diff --git a/vendor/go.etcd.io/bbolt/logger.go b/vendor/go.etcd.io/bbolt/logger.go new file mode 100644 index 0000000000000..fb250894a2956 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/logger.go @@ -0,0 +1,113 @@ +package bbolt + +// See https://github.com/etcd-io/raft/blob/main/logger.go +import ( + "fmt" + "io" + "log" + "os" +) + +type Logger interface { + Debug(v ...interface{}) + Debugf(format string, v ...interface{}) + + Error(v ...interface{}) + Errorf(format string, v ...interface{}) + + Info(v ...interface{}) + Infof(format string, v ...interface{}) + + Warning(v ...interface{}) + Warningf(format string, v ...interface{}) + + Fatal(v ...interface{}) + Fatalf(format string, v ...interface{}) + + Panic(v ...interface{}) + Panicf(format string, v ...interface{}) +} + +func getDiscardLogger() Logger { + return discardLogger +} + +var ( + discardLogger = &DefaultLogger{Logger: log.New(io.Discard, "", 0)} +) + +const ( + calldepth = 2 +) + +// DefaultLogger is a default implementation of the Logger interface. +type DefaultLogger struct { + *log.Logger + debug bool +} + +func (l *DefaultLogger) EnableTimestamps() { + l.SetFlags(l.Flags() | log.Ldate | log.Ltime) +} + +func (l *DefaultLogger) EnableDebug() { + l.debug = true +} + +func (l *DefaultLogger) Debug(v ...interface{}) { + if l.debug { + _ = l.Output(calldepth, header("DEBUG", fmt.Sprint(v...))) + } +} + +func (l *DefaultLogger) Debugf(format string, v ...interface{}) { + if l.debug { + _ = l.Output(calldepth, header("DEBUG", fmt.Sprintf(format, v...))) + } +} + +func (l *DefaultLogger) Info(v ...interface{}) { + _ = l.Output(calldepth, header("INFO", fmt.Sprint(v...))) +} + +func (l *DefaultLogger) Infof(format string, v ...interface{}) { + _ = l.Output(calldepth, header("INFO", fmt.Sprintf(format, v...))) +} + +func (l *DefaultLogger) Error(v ...interface{}) { + _ = l.Output(calldepth, header("ERROR", fmt.Sprint(v...))) +} + +func (l *DefaultLogger) Errorf(format string, v ...interface{}) { + _ = l.Output(calldepth, header("ERROR", fmt.Sprintf(format, v...))) +} + +func (l *DefaultLogger) Warning(v ...interface{}) { + _ = l.Output(calldepth, header("WARN", fmt.Sprint(v...))) +} + +func (l *DefaultLogger) Warningf(format string, v ...interface{}) { + _ = l.Output(calldepth, header("WARN", fmt.Sprintf(format, v...))) +} + +func (l *DefaultLogger) Fatal(v ...interface{}) { + _ = l.Output(calldepth, header("FATAL", fmt.Sprint(v...))) + os.Exit(1) +} + +func (l *DefaultLogger) Fatalf(format string, v ...interface{}) { + _ = l.Output(calldepth, header("FATAL", fmt.Sprintf(format, v...))) + os.Exit(1) +} + +func (l *DefaultLogger) Panic(v ...interface{}) { + l.Logger.Panic(v...) +} + +func (l *DefaultLogger) Panicf(format string, v ...interface{}) { + l.Logger.Panicf(format, v...) +} + +func header(lvl, msg string) string { + return fmt.Sprintf("%s: %s", lvl, msg) +} diff --git a/vendor/go.etcd.io/bbolt/node.go b/vendor/go.etcd.io/bbolt/node.go index 9c56150d88315..022b1001e279c 100644 --- a/vendor/go.etcd.io/bbolt/node.go +++ b/vendor/go.etcd.io/bbolt/node.go @@ -4,7 +4,8 @@ import ( "bytes" "fmt" "sort" - "unsafe" + + "go.etcd.io/bbolt/internal/common" ) // node represents an in-memory, deserialized page. @@ -14,10 +15,10 @@ type node struct { unbalanced bool spilled bool key []byte - pgid pgid + pgid common.Pgid parent *node children nodes - inodes inodes + inodes common.Inodes } // root returns the top-level node this node is attached to. @@ -38,10 +39,10 @@ func (n *node) minKeys() int { // size returns the size of the node after serialization. func (n *node) size() int { - sz, elsz := pageHeaderSize, n.pageElementSize() + sz, elsz := common.PageHeaderSize, n.pageElementSize() for i := 0; i < len(n.inodes); i++ { item := &n.inodes[i] - sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value)) + sz += elsz + uintptr(len(item.Key())) + uintptr(len(item.Value())) } return int(sz) } @@ -50,10 +51,10 @@ func (n *node) size() int { // This is an optimization to avoid calculating a large node when we only need // to know if it fits inside a certain page size. func (n *node) sizeLessThan(v uintptr) bool { - sz, elsz := pageHeaderSize, n.pageElementSize() + sz, elsz := common.PageHeaderSize, n.pageElementSize() for i := 0; i < len(n.inodes); i++ { item := &n.inodes[i] - sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value)) + sz += elsz + uintptr(len(item.Key())) + uintptr(len(item.Value())) if sz >= v { return false } @@ -64,9 +65,9 @@ func (n *node) sizeLessThan(v uintptr) bool { // pageElementSize returns the size of each page element based on the type of node. func (n *node) pageElementSize() uintptr { if n.isLeaf { - return leafPageElementSize + return common.LeafPageElementSize } - return branchPageElementSize + return common.BranchPageElementSize } // childAt returns the child node at a given index. @@ -74,12 +75,12 @@ func (n *node) childAt(index int) *node { if n.isLeaf { panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index)) } - return n.bucket.node(n.inodes[index].pgid, n) + return n.bucket.node(n.inodes[index].Pgid(), n) } // childIndex returns the index of a given child node. func (n *node) childIndex(child *node) int { - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 }) + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].Key(), child.key) != -1 }) return index } @@ -113,9 +114,9 @@ func (n *node) prevSibling() *node { } // put inserts a key/value. -func (n *node) put(oldKey, newKey, value []byte, pgId pgid, flags uint32) { - if pgId >= n.bucket.tx.meta.pgid { - panic(fmt.Sprintf("pgId (%d) above high water mark (%d)", pgId, n.bucket.tx.meta.pgid)) +func (n *node) put(oldKey, newKey, value []byte, pgId common.Pgid, flags uint32) { + if pgId >= n.bucket.tx.meta.Pgid() { + panic(fmt.Sprintf("pgId (%d) above high water mark (%d)", pgId, n.bucket.tx.meta.Pgid())) } else if len(oldKey) <= 0 { panic("put: zero-length old key") } else if len(newKey) <= 0 { @@ -123,30 +124,30 @@ func (n *node) put(oldKey, newKey, value []byte, pgId pgid, flags uint32) { } // Find insertion index. - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 }) + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].Key(), oldKey) != -1 }) // Add capacity and shift nodes if we don't have an exact match and need to insert. - exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey)) + exact := len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].Key(), oldKey) if !exact { - n.inodes = append(n.inodes, inode{}) + n.inodes = append(n.inodes, common.Inode{}) copy(n.inodes[index+1:], n.inodes[index:]) } inode := &n.inodes[index] - inode.flags = flags - inode.key = newKey - inode.value = value - inode.pgid = pgId - _assert(len(inode.key) > 0, "put: zero-length inode key") + inode.SetFlags(flags) + inode.SetKey(newKey) + inode.SetValue(value) + inode.SetPgid(pgId) + common.Assert(len(inode.Key()) > 0, "put: zero-length inode key") } // del removes a key from the node. func (n *node) del(key []byte) { // Find index of key. - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 }) + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].Key(), key) != -1 }) // Exit if the key isn't found. - if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) { + if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].Key(), key) { return } @@ -158,30 +159,15 @@ func (n *node) del(key []byte) { } // read initializes the node from a page. -func (n *node) read(p *page) { - n.pgid = p.id - n.isLeaf = ((p.flags & leafPageFlag) != 0) - n.inodes = make(inodes, int(p.count)) - - for i := 0; i < int(p.count); i++ { - inode := &n.inodes[i] - if n.isLeaf { - elem := p.leafPageElement(uint16(i)) - inode.flags = elem.flags - inode.key = elem.key() - inode.value = elem.value() - } else { - elem := p.branchPageElement(uint16(i)) - inode.pgid = elem.pgid - inode.key = elem.key() - } - _assert(len(inode.key) > 0, "read: zero-length inode key") - } +func (n *node) read(p *common.Page) { + n.pgid = p.Id() + n.isLeaf = p.IsLeafPage() + n.inodes = common.ReadInodeFromPage(p) - // Save first key so we can find the node in the parent when we spill. + // Save first key, so we can find the node in the parent when we spill. if len(n.inodes) > 0 { - n.key = n.inodes[0].key - _assert(len(n.key) > 0, "read: zero-length node key") + n.key = n.inodes[0].Key() + common.Assert(len(n.key) > 0, "read: zero-length node key") } else { n.key = nil } @@ -190,57 +176,27 @@ func (n *node) read(p *page) { // write writes the items onto one or more pages. // The page should have p.id (might be 0 for meta or bucket-inline page) and p.overflow set // and the rest should be zeroed. -func (n *node) write(p *page) { - _assert(p.count == 0 && p.flags == 0, "node cannot be written into a not empty page") +func (n *node) write(p *common.Page) { + common.Assert(p.Count() == 0 && p.Flags() == 0, "node cannot be written into a not empty page") // Initialize page. if n.isLeaf { - p.flags = leafPageFlag + p.SetFlags(common.LeafPageFlag) } else { - p.flags = branchPageFlag + p.SetFlags(common.BranchPageFlag) } if len(n.inodes) >= 0xFFFF { - panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id)) + panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.Id())) } - p.count = uint16(len(n.inodes)) + p.SetCount(uint16(len(n.inodes))) // Stop here if there are no items to write. - if p.count == 0 { + if p.Count() == 0 { return } - // Loop over each item and write it to the page. - // off tracks the offset into p of the start of the next data. - off := unsafe.Sizeof(*p) + n.pageElementSize()*uintptr(len(n.inodes)) - for i, item := range n.inodes { - _assert(len(item.key) > 0, "write: zero-length inode key") - - // Create a slice to write into of needed size and advance - // byte pointer for next iteration. - sz := len(item.key) + len(item.value) - b := unsafeByteSlice(unsafe.Pointer(p), off, 0, sz) - off += uintptr(sz) - - // Write the page element. - if n.isLeaf { - elem := p.leafPageElement(uint16(i)) - elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) - elem.flags = item.flags - elem.ksize = uint32(len(item.key)) - elem.vsize = uint32(len(item.value)) - } else { - elem := p.branchPageElement(uint16(i)) - elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) - elem.ksize = uint32(len(item.key)) - elem.pgid = item.pgid - _assert(elem.pgid != p.id, "write: circular dependency occurred") - } - - // Write data for the element to the end of the page. - l := copy(b, item.key) - copy(b[l:], item.value) - } + common.WriteInodeToPage(n.inodes, p) // DEBUG ONLY: n.dump() } @@ -273,7 +229,7 @@ func (n *node) split(pageSize uintptr) []*node { func (n *node) splitTwo(pageSize uintptr) (*node, *node) { // Ignore the split if the page doesn't have at least enough nodes for // two pages or if the nodes can fit in a single page. - if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) { + if len(n.inodes) <= (common.MinKeysPerPage*2) || n.sizeLessThan(pageSize) { return n, nil } @@ -313,17 +269,17 @@ func (n *node) splitTwo(pageSize uintptr) (*node, *node) { // It returns the index as well as the size of the first page. // This is only be called from split(). func (n *node) splitIndex(threshold int) (index, sz uintptr) { - sz = pageHeaderSize + sz = common.PageHeaderSize // Loop until we only have the minimum number of keys required for the second page. - for i := 0; i < len(n.inodes)-minKeysPerPage; i++ { + for i := 0; i < len(n.inodes)-common.MinKeysPerPage; i++ { index = uintptr(i) inode := n.inodes[i] - elsize := n.pageElementSize() + uintptr(len(inode.key)) + uintptr(len(inode.value)) + elsize := n.pageElementSize() + uintptr(len(inode.Key())) + uintptr(len(inode.Value())) // If we have at least the minimum number of keys and adding another // node would put us over the threshold then exit and return. - if index >= minKeysPerPage && sz+elsize > uintptr(threshold) { + if index >= common.MinKeysPerPage && sz+elsize > uintptr(threshold) { break } @@ -360,7 +316,7 @@ func (n *node) spill() error { for _, node := range nodes { // Add node's page to the freelist if it's not new. if node.pgid > 0 { - tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid)) + tx.db.freelist.Free(tx.meta.Txid(), tx.page(node.pgid)) node.pgid = 0 } @@ -371,10 +327,10 @@ func (n *node) spill() error { } // Write the node. - if p.id >= tx.meta.pgid { - panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid)) + if p.Id() >= tx.meta.Pgid() { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.Id(), tx.meta.Pgid())) } - node.pgid = p.id + node.pgid = p.Id() node.write(p) node.spilled = true @@ -382,12 +338,12 @@ func (n *node) spill() error { if node.parent != nil { var key = node.key if key == nil { - key = node.inodes[0].key + key = node.inodes[0].Key() } - node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0) - node.key = node.inodes[0].key - _assert(len(node.key) > 0, "spill: zero-length node key") + node.parent.put(key, node.inodes[0].Key(), nil, node.pgid, 0) + node.key = node.inodes[0].Key() + common.Assert(len(node.key) > 0, "spill: zero-length node key") } // Update the statistics. @@ -415,8 +371,8 @@ func (n *node) rebalance() { // Update statistics. n.bucket.tx.stats.IncRebalance(1) - // Ignore if node is above threshold (25%) and has enough keys. - var threshold = n.bucket.tx.db.pageSize / 4 + // Ignore if node is above threshold (25% when FillPercent is set to DefaultFillPercent) and has enough keys. + var threshold = int(float64(n.bucket.tx.db.pageSize)*n.bucket.FillPercent) / 2 if n.size() > threshold && len(n.inodes) > n.minKeys() { return } @@ -426,14 +382,14 @@ func (n *node) rebalance() { // If root node is a branch and only has one node then collapse it. if !n.isLeaf && len(n.inodes) == 1 { // Move root's child up. - child := n.bucket.node(n.inodes[0].pgid, n) + child := n.bucket.node(n.inodes[0].Pgid(), n) n.isLeaf = child.isLeaf n.inodes = child.inodes[:] n.children = child.children // Reparent all child nodes being moved. for _, inode := range n.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { + if child, ok := n.bucket.nodes[inode.Pgid()]; ok { child.parent = n } } @@ -457,53 +413,37 @@ func (n *node) rebalance() { return } - _assert(n.parent.numChildren() > 1, "parent must have at least 2 children") + common.Assert(n.parent.numChildren() > 1, "parent must have at least 2 children") - // Destination node is right sibling if idx == 0, otherwise left sibling. - var target *node - var useNextSibling = (n.parent.childIndex(n) == 0) + // Merge with right sibling if idx == 0, otherwise left sibling. + var leftNode, rightNode *node + var useNextSibling = n.parent.childIndex(n) == 0 if useNextSibling { - target = n.nextSibling() + leftNode = n + rightNode = n.nextSibling() } else { - target = n.prevSibling() + leftNode = n.prevSibling() + rightNode = n } - // If both this node and the target node are too small then merge them. - if useNextSibling { - // Reparent all child nodes being moved. - for _, inode := range target.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { - child.parent.removeChild(child) - child.parent = n - child.parent.children = append(child.parent.children, child) - } + // If both nodes are too small then merge them. + // Reparent all child nodes being moved. + for _, inode := range rightNode.inodes { + if child, ok := n.bucket.nodes[inode.Pgid()]; ok { + child.parent.removeChild(child) + child.parent = leftNode + child.parent.children = append(child.parent.children, child) } - - // Copy over inodes from target and remove target. - n.inodes = append(n.inodes, target.inodes...) - n.parent.del(target.key) - n.parent.removeChild(target) - delete(n.bucket.nodes, target.pgid) - target.free() - } else { - // Reparent all child nodes being moved. - for _, inode := range n.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { - child.parent.removeChild(child) - child.parent = target - child.parent.children = append(child.parent.children, child) - } - } - - // Copy over inodes to target and remove node. - target.inodes = append(target.inodes, n.inodes...) - n.parent.del(n.key) - n.parent.removeChild(n) - delete(n.bucket.nodes, n.pgid) - n.free() } - // Either this node or the target node was deleted from the parent so rebalance it. + // Copy over inodes from right node to left node and remove right node. + leftNode.inodes = append(leftNode.inodes, rightNode.inodes...) + n.parent.del(rightNode.key) + n.parent.removeChild(rightNode) + delete(n.bucket.nodes, rightNode.pgid) + rightNode.free() + + // Either this node or the sibling node was deleted from the parent so rebalance it. n.parent.rebalance() } @@ -525,20 +465,20 @@ func (n *node) dereference() { key := make([]byte, len(n.key)) copy(key, n.key) n.key = key - _assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node") + common.Assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node") } for i := range n.inodes { inode := &n.inodes[i] - key := make([]byte, len(inode.key)) - copy(key, inode.key) - inode.key = key - _assert(len(inode.key) > 0, "dereference: zero-length inode key") + key := make([]byte, len(inode.Key())) + copy(key, inode.Key()) + inode.SetKey(key) + common.Assert(len(inode.Key()) > 0, "dereference: zero-length inode key") - value := make([]byte, len(inode.value)) - copy(value, inode.value) - inode.value = value + value := make([]byte, len(inode.Value())) + copy(value, inode.Value()) + inode.SetValue(value) } // Recursively dereference children. @@ -553,7 +493,7 @@ func (n *node) dereference() { // free adds the node's underlying page to the freelist. func (n *node) free() { if n.pgid != 0 { - n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid)) + n.bucket.tx.db.freelist.Free(n.bucket.tx.meta.Txid(), n.bucket.tx.page(n.pgid)) n.pgid = 0 } } @@ -594,17 +534,5 @@ type nodes []*node func (s nodes) Len() int { return len(s) } func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s nodes) Less(i, j int) bool { - return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 + return bytes.Compare(s[i].inodes[0].Key(), s[j].inodes[0].Key()) == -1 } - -// inode represents an internal node inside of a node. -// It can be used to point to elements in a page or point -// to an element which hasn't been added to a page yet. -type inode struct { - flags uint32 - pgid pgid - key []byte - value []byte -} - -type inodes []inode diff --git a/vendor/go.etcd.io/bbolt/page.go b/vendor/go.etcd.io/bbolt/page.go deleted file mode 100644 index bb081b031e659..0000000000000 --- a/vendor/go.etcd.io/bbolt/page.go +++ /dev/null @@ -1,212 +0,0 @@ -package bbolt - -import ( - "fmt" - "os" - "sort" - "unsafe" -) - -const pageHeaderSize = unsafe.Sizeof(page{}) - -const minKeysPerPage = 2 - -const branchPageElementSize = unsafe.Sizeof(branchPageElement{}) -const leafPageElementSize = unsafe.Sizeof(leafPageElement{}) - -const ( - branchPageFlag = 0x01 - leafPageFlag = 0x02 - metaPageFlag = 0x04 - freelistPageFlag = 0x10 -) - -const ( - bucketLeafFlag = 0x01 -) - -type pgid uint64 - -type page struct { - id pgid - flags uint16 - count uint16 - overflow uint32 -} - -// typ returns a human readable page type string used for debugging. -func (p *page) typ() string { - if (p.flags & branchPageFlag) != 0 { - return "branch" - } else if (p.flags & leafPageFlag) != 0 { - return "leaf" - } else if (p.flags & metaPageFlag) != 0 { - return "meta" - } else if (p.flags & freelistPageFlag) != 0 { - return "freelist" - } - return fmt.Sprintf("unknown<%02x>", p.flags) -} - -// meta returns a pointer to the metadata section of the page. -func (p *page) meta() *meta { - return (*meta)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))) -} - -func (p *page) fastCheck(id pgid) { - _assert(p.id == id, "Page expected to be: %v, but self identifies as %v", id, p.id) - // Only one flag of page-type can be set. - _assert(p.flags == branchPageFlag || - p.flags == leafPageFlag || - p.flags == metaPageFlag || - p.flags == freelistPageFlag, - "page %v: has unexpected type/flags: %x", p.id, p.flags) -} - -// leafPageElement retrieves the leaf node by index -func (p *page) leafPageElement(index uint16) *leafPageElement { - return (*leafPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), - leafPageElementSize, int(index))) -} - -// leafPageElements retrieves a list of leaf nodes. -func (p *page) leafPageElements() []leafPageElement { - if p.count == 0 { - return nil - } - data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) - elems := unsafe.Slice((*leafPageElement)(data), int(p.count)) - return elems -} - -// branchPageElement retrieves the branch node by index -func (p *page) branchPageElement(index uint16) *branchPageElement { - return (*branchPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), - unsafe.Sizeof(branchPageElement{}), int(index))) -} - -// branchPageElements retrieves a list of branch nodes. -func (p *page) branchPageElements() []branchPageElement { - if p.count == 0 { - return nil - } - data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) - elems := unsafe.Slice((*branchPageElement)(data), int(p.count)) - return elems -} - -// dump writes n bytes of the page to STDERR as hex output. -func (p *page) hexdump(n int) { - buf := unsafeByteSlice(unsafe.Pointer(p), 0, 0, n) - fmt.Fprintf(os.Stderr, "%x\n", buf) -} - -type pages []*page - -func (s pages) Len() int { return len(s) } -func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s pages) Less(i, j int) bool { return s[i].id < s[j].id } - -// branchPageElement represents a node on a branch page. -type branchPageElement struct { - pos uint32 - ksize uint32 - pgid pgid -} - -// key returns a byte slice of the node key. -func (n *branchPageElement) key() []byte { - return unsafeByteSlice(unsafe.Pointer(n), 0, int(n.pos), int(n.pos)+int(n.ksize)) -} - -// leafPageElement represents a node on a leaf page. -type leafPageElement struct { - flags uint32 - pos uint32 - ksize uint32 - vsize uint32 -} - -// key returns a byte slice of the node key. -func (n *leafPageElement) key() []byte { - i := int(n.pos) - j := i + int(n.ksize) - return unsafeByteSlice(unsafe.Pointer(n), 0, i, j) -} - -// value returns a byte slice of the node value. -func (n *leafPageElement) value() []byte { - i := int(n.pos) + int(n.ksize) - j := i + int(n.vsize) - return unsafeByteSlice(unsafe.Pointer(n), 0, i, j) -} - -// PageInfo represents human readable information about a page. -type PageInfo struct { - ID int - Type string - Count int - OverflowCount int -} - -type pgids []pgid - -func (s pgids) Len() int { return len(s) } -func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s pgids) Less(i, j int) bool { return s[i] < s[j] } - -// merge returns the sorted union of a and b. -func (a pgids) merge(b pgids) pgids { - // Return the opposite slice if one is nil. - if len(a) == 0 { - return b - } - if len(b) == 0 { - return a - } - merged := make(pgids, len(a)+len(b)) - mergepgids(merged, a, b) - return merged -} - -// mergepgids copies the sorted union of a and b into dst. -// If dst is too small, it panics. -func mergepgids(dst, a, b pgids) { - if len(dst) < len(a)+len(b) { - panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b))) - } - // Copy in the opposite slice if one is nil. - if len(a) == 0 { - copy(dst, b) - return - } - if len(b) == 0 { - copy(dst, a) - return - } - - // Merged will hold all elements from both lists. - merged := dst[:0] - - // Assign lead to the slice with a lower starting value, follow to the higher value. - lead, follow := a, b - if b[0] < a[0] { - lead, follow = b, a - } - - // Continue while there are elements in the lead. - for len(lead) > 0 { - // Merge largest prefix of lead that is ahead of follow[0]. - n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] }) - merged = append(merged, lead[:n]...) - if n >= len(lead) { - break - } - - // Swap lead and follow. - lead, follow = follow, lead[n:] - } - - // Append what's left in follow. - _ = append(merged, follow...) -} diff --git a/vendor/go.etcd.io/bbolt/tx.go b/vendor/go.etcd.io/bbolt/tx.go index 766395de3be7d..7b5db77278629 100644 --- a/vendor/go.etcd.io/bbolt/tx.go +++ b/vendor/go.etcd.io/bbolt/tx.go @@ -5,15 +5,16 @@ import ( "fmt" "io" "os" + "runtime" "sort" "strings" "sync/atomic" "time" "unsafe" -) -// txid represents the internal transaction identifier. -type txid uint64 + berrors "go.etcd.io/bbolt/errors" + "go.etcd.io/bbolt/internal/common" +) // Tx represents a read-only or read/write transaction on the database. // Read-only transactions can be used for retrieving values for keys and creating cursors. @@ -27,9 +28,9 @@ type Tx struct { writable bool managed bool db *DB - meta *meta + meta *common.Meta root Bucket - pages map[pgid]*page + pages map[common.Pgid]*common.Page stats TxStats commitHandlers []func() @@ -48,24 +49,27 @@ func (tx *Tx) init(db *DB) { tx.pages = nil // Copy the meta page since it can be changed by the writer. - tx.meta = &meta{} - db.meta().copy(tx.meta) + tx.meta = &common.Meta{} + db.meta().Copy(tx.meta) // Copy over the root bucket. tx.root = newBucket(tx) - tx.root.bucket = &bucket{} - *tx.root.bucket = tx.meta.root + tx.root.InBucket = &common.InBucket{} + *tx.root.InBucket = *(tx.meta.RootBucket()) // Increment the transaction id and add a page cache for writable transactions. if tx.writable { - tx.pages = make(map[pgid]*page) - tx.meta.txid += txid(1) + tx.pages = make(map[common.Pgid]*common.Page) + tx.meta.IncTxid() } } // ID returns the transaction id. func (tx *Tx) ID() int { - return int(tx.meta.txid) + if tx == nil || tx.meta == nil { + return -1 + } + return int(tx.meta.Txid()) } // DB returns a reference to the database that created the transaction. @@ -75,7 +79,7 @@ func (tx *Tx) DB() *DB { // Size returns current database size in bytes as seen by this transaction. func (tx *Tx) Size() int64 { - return int64(tx.meta.pgid) * int64(tx.db.pageSize) + return int64(tx.meta.Pgid()) * int64(tx.db.pageSize) } // Writable returns whether the transaction can perform write operations. @@ -96,6 +100,11 @@ func (tx *Tx) Stats() TxStats { return tx.stats } +// Inspect returns the structure of the database. +func (tx *Tx) Inspect() BucketStructure { + return tx.root.Inspect() +} + // Bucket retrieves a bucket by name. // Returns nil if the bucket does not exist. // The bucket instance is only valid for the lifetime of the transaction. @@ -123,6 +132,24 @@ func (tx *Tx) DeleteBucket(name []byte) error { return tx.root.DeleteBucket(name) } +// MoveBucket moves a sub-bucket from the source bucket to the destination bucket. +// Returns an error if +// 1. the sub-bucket cannot be found in the source bucket; +// 2. or the key already exists in the destination bucket; +// 3. the key represents a non-bucket value. +// +// If src is nil, it means moving a top level bucket into the target bucket. +// If dst is nil, it means converting the child bucket into a top level bucket. +func (tx *Tx) MoveBucket(child []byte, src *Bucket, dst *Bucket) error { + if src == nil { + src = &tx.root + } + if dst == nil { + dst = &tx.root + } + return src.MoveBucket(child, dst) +} + // ForEach executes a function for each bucket in the root. // If the provided function returns an error then the iteration is stopped and // the error is returned to the caller. @@ -137,15 +164,28 @@ func (tx *Tx) OnCommit(fn func()) { tx.commitHandlers = append(tx.commitHandlers, fn) } -// Commit writes all changes to disk and updates the meta page. +// Commit writes all changes to disk, updates the meta page and closes the transaction. // Returns an error if a disk write error occurs, or if Commit is // called on a read-only transaction. -func (tx *Tx) Commit() error { - _assert(!tx.managed, "managed tx commit not allowed") +func (tx *Tx) Commit() (err error) { + txId := tx.ID() + lg := tx.db.Logger() + if lg != discardLogger { + lg.Debugf("Committing transaction %d", txId) + defer func() { + if err != nil { + lg.Errorf("Committing transaction failed: %v", err) + } else { + lg.Debugf("Committing transaction %d successfully", txId) + } + }() + } + + common.Assert(!tx.managed, "managed tx commit not allowed") if tx.db == nil { - return ErrTxClosed + return berrors.ErrTxClosed } else if !tx.writable { - return ErrTxNotWritable + return berrors.ErrTxNotWritable } // TODO(benbjohnson): Use vectorized I/O to write out dirty pages. @@ -157,40 +197,43 @@ func (tx *Tx) Commit() error { tx.stats.IncRebalanceTime(time.Since(startTime)) } - opgid := tx.meta.pgid + opgid := tx.meta.Pgid() // spill data onto dirty pages. startTime = time.Now() - if err := tx.root.spill(); err != nil { + if err = tx.root.spill(); err != nil { + lg.Errorf("spilling data onto dirty pages failed: %v", err) tx.rollback() return err } tx.stats.IncSpillTime(time.Since(startTime)) // Free the old root bucket. - tx.meta.root.root = tx.root.root + tx.meta.RootBucket().SetRootPage(tx.root.RootPage()) // Free the old freelist because commit writes out a fresh freelist. - if tx.meta.freelist != pgidNoFreelist { - tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) + if tx.meta.Freelist() != common.PgidNoFreelist { + tx.db.freelist.Free(tx.meta.Txid(), tx.db.page(tx.meta.Freelist())) } if !tx.db.NoFreelistSync { - err := tx.commitFreelist() + err = tx.commitFreelist() if err != nil { + lg.Errorf("committing freelist failed: %v", err) return err } } else { - tx.meta.freelist = pgidNoFreelist + tx.meta.SetFreelist(common.PgidNoFreelist) } // If the high water mark has moved up then attempt to grow the database. - if tx.meta.pgid > opgid { + if tx.meta.Pgid() > opgid { _ = errors.New("") // gofail: var lackOfDiskSpace string // tx.rollback() // return errors.New(lackOfDiskSpace) - if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { + if err = tx.db.grow(int(tx.meta.Pgid()+1) * tx.db.pageSize); err != nil { + lg.Errorf("growing db size failed, pgid: %d, pagesize: %d, error: %v", tx.meta.Pgid(), tx.db.pageSize, err) tx.rollback() return err } @@ -198,7 +241,8 @@ func (tx *Tx) Commit() error { // Write dirty pages to disk. startTime = time.Now() - if err := tx.write(); err != nil { + if err = tx.write(); err != nil { + lg.Errorf("writing data failed: %v", err) tx.rollback() return err } @@ -208,11 +252,11 @@ func (tx *Tx) Commit() error { ch := tx.Check() var errs []string for { - err, ok := <-ch + chkErr, ok := <-ch if !ok { break } - errs = append(errs, err.Error()) + errs = append(errs, chkErr.Error()) } if len(errs) > 0 { panic("check fail: " + strings.Join(errs, "\n")) @@ -220,7 +264,8 @@ func (tx *Tx) Commit() error { } // Write meta to disk. - if err := tx.writeMeta(); err != nil { + if err = tx.writeMeta(); err != nil { + lg.Errorf("writeMeta failed: %v", err) tx.rollback() return err } @@ -240,16 +285,14 @@ func (tx *Tx) Commit() error { func (tx *Tx) commitFreelist() error { // Allocate new pages for the new free list. This will overestimate // the size of the freelist but not underestimate the size (which would be bad). - p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) + p, err := tx.allocate((tx.db.freelist.EstimatedWritePageSize() / tx.db.pageSize) + 1) if err != nil { tx.rollback() return err } - if err := tx.db.freelist.write(p); err != nil { - tx.rollback() - return err - } - tx.meta.freelist = p.id + + tx.db.freelist.Write(p) + tx.meta.SetFreelist(p.Id()) return nil } @@ -257,9 +300,9 @@ func (tx *Tx) commitFreelist() error { // Rollback closes the transaction and ignores all previous updates. Read-only // transactions must be rolled back and not committed. func (tx *Tx) Rollback() error { - _assert(!tx.managed, "managed tx rollback not allowed") + common.Assert(!tx.managed, "managed tx rollback not allowed") if tx.db == nil { - return ErrTxClosed + return berrors.ErrTxClosed } tx.nonPhysicalRollback() return nil @@ -271,7 +314,7 @@ func (tx *Tx) nonPhysicalRollback() { return } if tx.writable { - tx.db.freelist.rollback(tx.meta.txid) + tx.db.freelist.Rollback(tx.meta.Txid()) } tx.close() } @@ -282,17 +325,17 @@ func (tx *Tx) rollback() { return } if tx.writable { - tx.db.freelist.rollback(tx.meta.txid) + tx.db.freelist.Rollback(tx.meta.Txid()) // When mmap fails, the `data`, `dataref` and `datasz` may be reset to // zero values, and there is no way to reload free page IDs in this case. if tx.db.data != nil { if !tx.db.hasSyncedFreelist() { // Reconstruct free page list by scanning the DB to get the whole free page list. - // Note: scaning the whole db is heavy if your db size is large in NoSyncFreeList mode. - tx.db.freelist.noSyncReload(tx.db.freepages()) + // Note: scanning the whole db is heavy if your db size is large in NoSyncFreeList mode. + tx.db.freelist.NoSyncReload(tx.db.freepages()) } else { // Read free page list from freelist page. - tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist)) + tx.db.freelist.Reload(tx.db.page(tx.db.meta().Freelist())) } } } @@ -305,9 +348,9 @@ func (tx *Tx) close() { } if tx.writable { // Grab freelist stats. - var freelistFreeN = tx.db.freelist.free_count() - var freelistPendingN = tx.db.freelist.pending_count() - var freelistAlloc = tx.db.freelist.size() + var freelistFreeN = tx.db.freelist.FreeCount() + var freelistPendingN = tx.db.freelist.PendingCount() + var freelistAlloc = tx.db.freelist.EstimatedWritePageSize() // Remove transaction ref & writer lock. tx.db.rwtx = nil @@ -335,7 +378,7 @@ func (tx *Tx) close() { // Copy writes the entire database to a writer. // This function exists for backwards compatibility. // -// Deprecated; Use WriteTo() instead. +// Deprecated: Use WriteTo() instead. func (tx *Tx) Copy(w io.Writer) error { _, err := tx.WriteTo(w) return err @@ -357,13 +400,13 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { // Generate a meta page. We use the same page data for both meta pages. buf := make([]byte, tx.db.pageSize) - page := (*page)(unsafe.Pointer(&buf[0])) - page.flags = metaPageFlag - *page.meta() = *tx.meta + page := (*common.Page)(unsafe.Pointer(&buf[0])) + page.SetFlags(common.MetaPageFlag) + *page.Meta() = *tx.meta // Write meta 0. - page.id = 0 - page.meta().checksum = page.meta().sum64() + page.SetId(0) + page.Meta().SetChecksum(page.Meta().Sum64()) nn, err := w.Write(buf) n += int64(nn) if err != nil { @@ -371,9 +414,9 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { } // Write meta 1 with a lower transaction id. - page.id = 1 - page.meta().txid -= 1 - page.meta().checksum = page.meta().sum64() + page.SetId(1) + page.Meta().DecTxid() + page.Meta().SetChecksum(page.Meta().Sum64()) nn, err = w.Write(buf) n += int64(nn) if err != nil { @@ -413,14 +456,16 @@ func (tx *Tx) CopyFile(path string, mode os.FileMode) error { } // allocate returns a contiguous block of memory starting at a given page. -func (tx *Tx) allocate(count int) (*page, error) { - p, err := tx.db.allocate(tx.meta.txid, count) +func (tx *Tx) allocate(count int) (*common.Page, error) { + lg := tx.db.Logger() + p, err := tx.db.allocate(tx.meta.Txid(), count) if err != nil { + lg.Errorf("allocating failed, txid: %d, count: %d, error: %v", tx.meta.Txid(), count, err) return nil, err } // Save to our page cache. - tx.pages[p.id] = p + tx.pages[p.Id()] = p // Update statistics. tx.stats.IncPageCount(int64(count)) @@ -432,18 +477,19 @@ func (tx *Tx) allocate(count int) (*page, error) { // write writes any dirty pages to disk. func (tx *Tx) write() error { // Sort pages by id. - pages := make(pages, 0, len(tx.pages)) + lg := tx.db.Logger() + pages := make(common.Pages, 0, len(tx.pages)) for _, p := range tx.pages { pages = append(pages, p) } // Clear out page cache early. - tx.pages = make(map[pgid]*page) + tx.pages = make(map[common.Pgid]*common.Page) sort.Sort(pages) // Write pages to disk in order. for _, p := range pages { - rem := (uint64(p.overflow) + 1) * uint64(tx.db.pageSize) - offset := int64(p.id) * int64(tx.db.pageSize) + rem := (uint64(p.Overflow()) + 1) * uint64(tx.db.pageSize) + offset := int64(p.Id()) * int64(tx.db.pageSize) var written uintptr // Write out page in "max allocation" sized chunks. @@ -452,9 +498,10 @@ func (tx *Tx) write() error { if sz > maxAllocSize-1 { sz = maxAllocSize - 1 } - buf := unsafeByteSlice(unsafe.Pointer(p), written, 0, int(sz)) + buf := common.UnsafeByteSlice(unsafe.Pointer(p), written, 0, int(sz)) if _, err := tx.db.ops.writeAt(buf, offset); err != nil { + lg.Errorf("writeAt failed, offset: %d: %w", offset, err) return err } @@ -474,9 +521,10 @@ func (tx *Tx) write() error { } // Ignore file sync if flag is set on DB. - if !tx.db.NoSync || IgnoreNoSync { + if !tx.db.NoSync || common.IgnoreNoSync { // gofail: var beforeSyncDataPages struct{} if err := fdatasync(tx.db); err != nil { + lg.Errorf("[GOOS: %s, GOARCH: %s] fdatasync failed: %w", runtime.GOOS, runtime.GOARCH, err) return err } } @@ -485,11 +533,11 @@ func (tx *Tx) write() error { for _, p := range pages { // Ignore page sizes over 1 page. // These are allocated using make() instead of the page pool. - if int(p.overflow) != 0 { + if int(p.Overflow()) != 0 { continue } - buf := unsafeByteSlice(unsafe.Pointer(p), 0, 0, tx.db.pageSize) + buf := common.UnsafeByteSlice(unsafe.Pointer(p), 0, 0, tx.db.pageSize) // See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1 for i := range buf { @@ -503,18 +551,24 @@ func (tx *Tx) write() error { // writeMeta writes the meta to the disk. func (tx *Tx) writeMeta() error { + // gofail: var beforeWriteMetaError string + // return errors.New(beforeWriteMetaError) + // Create a temporary buffer for the meta page. + lg := tx.db.Logger() buf := make([]byte, tx.db.pageSize) p := tx.db.pageInBuffer(buf, 0) - tx.meta.write(p) + tx.meta.Write(p) // Write the meta page to file. - if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil { + if _, err := tx.db.ops.writeAt(buf, int64(p.Id())*int64(tx.db.pageSize)); err != nil { + lg.Errorf("writeAt failed, pgid: %d, pageSize: %d, error: %v", p.Id(), tx.db.pageSize, err) return err } - if !tx.db.NoSync || IgnoreNoSync { + if !tx.db.NoSync || common.IgnoreNoSync { // gofail: var beforeSyncMetaPage struct{} if err := fdatasync(tx.db); err != nil { + lg.Errorf("[GOOS: %s, GOARCH: %s] fdatasync failed: %w", runtime.GOOS, runtime.GOARCH, err) return err } } @@ -527,69 +581,69 @@ func (tx *Tx) writeMeta() error { // page returns a reference to the page with a given id. // If page has been written to then a temporary buffered page is returned. -func (tx *Tx) page(id pgid) *page { +func (tx *Tx) page(id common.Pgid) *common.Page { // Check the dirty pages first. if tx.pages != nil { if p, ok := tx.pages[id]; ok { - p.fastCheck(id) + p.FastCheck(id) return p } } // Otherwise return directly from the mmap. p := tx.db.page(id) - p.fastCheck(id) + p.FastCheck(id) return p } // forEachPage iterates over every page within a given page and executes a function. -func (tx *Tx) forEachPage(pgidnum pgid, fn func(*page, int, []pgid)) { - stack := make([]pgid, 10) +func (tx *Tx) forEachPage(pgidnum common.Pgid, fn func(*common.Page, int, []common.Pgid)) { + stack := make([]common.Pgid, 10) stack[0] = pgidnum tx.forEachPageInternal(stack[:1], fn) } -func (tx *Tx) forEachPageInternal(pgidstack []pgid, fn func(*page, int, []pgid)) { +func (tx *Tx) forEachPageInternal(pgidstack []common.Pgid, fn func(*common.Page, int, []common.Pgid)) { p := tx.page(pgidstack[len(pgidstack)-1]) // Execute function. fn(p, len(pgidstack)-1, pgidstack) // Recursively loop over children. - if (p.flags & branchPageFlag) != 0 { - for i := 0; i < int(p.count); i++ { - elem := p.branchPageElement(uint16(i)) - tx.forEachPageInternal(append(pgidstack, elem.pgid), fn) + if p.IsBranchPage() { + for i := 0; i < int(p.Count()); i++ { + elem := p.BranchPageElement(uint16(i)) + tx.forEachPageInternal(append(pgidstack, elem.Pgid()), fn) } } } // Page returns page information for a given page number. // This is only safe for concurrent use when used by a writable transaction. -func (tx *Tx) Page(id int) (*PageInfo, error) { +func (tx *Tx) Page(id int) (*common.PageInfo, error) { if tx.db == nil { - return nil, ErrTxClosed - } else if pgid(id) >= tx.meta.pgid { + return nil, berrors.ErrTxClosed + } else if common.Pgid(id) >= tx.meta.Pgid() { return nil, nil } if tx.db.freelist == nil { - return nil, ErrFreePagesNotLoaded + return nil, berrors.ErrFreePagesNotLoaded } // Build the page info. - p := tx.db.page(pgid(id)) - info := &PageInfo{ + p := tx.db.page(common.Pgid(id)) + info := &common.PageInfo{ ID: id, - Count: int(p.count), - OverflowCount: int(p.overflow), + Count: int(p.Count()), + OverflowCount: int(p.Overflow()), } // Determine the type (or if it's free). - if tx.db.freelist.freed(pgid(id)) { + if tx.db.freelist.Freed(common.Pgid(id)) { info.Type = "free" } else { - info.Type = p.typ() + info.Type = p.Typ() } return info, nil diff --git a/vendor/go.etcd.io/bbolt/tx_check.go b/vendor/go.etcd.io/bbolt/tx_check.go index 75c7c08436dc2..c3ecbb975073a 100644 --- a/vendor/go.etcd.io/bbolt/tx_check.go +++ b/vendor/go.etcd.io/bbolt/tx_check.go @@ -3,6 +3,8 @@ package bbolt import ( "encoding/hex" "fmt" + + "go.etcd.io/bbolt/internal/common" ) // Check performs several consistency checks on the database for this transaction. @@ -13,13 +15,10 @@ import ( // because of caching. This overhead can be removed if running on a read-only // transaction, however, it is not safe to execute other writer transactions at // the same time. -func (tx *Tx) Check() <-chan error { - return tx.CheckWithOptions() -} - -// CheckWithOptions allows users to provide a customized `KVStringer` implementation, +// +// It also allows users to provide a customized `KVStringer` implementation, // so that bolt can generate human-readable diagnostic messages. -func (tx *Tx) CheckWithOptions(options ...CheckOption) <-chan error { +func (tx *Tx) Check(options ...CheckOption) <-chan error { chkConfig := checkConfig{ kvStringer: HexKVStringer(), } @@ -28,18 +27,22 @@ func (tx *Tx) CheckWithOptions(options ...CheckOption) <-chan error { } ch := make(chan error) - go tx.check(chkConfig.kvStringer, ch) + go func() { + // Close the channel to signal completion. + defer close(ch) + tx.check(chkConfig, ch) + }() return ch } -func (tx *Tx) check(kvStringer KVStringer, ch chan error) { +func (tx *Tx) check(cfg checkConfig, ch chan error) { // Force loading free list if opened in ReadOnly mode. tx.db.loadFreelist() // Check if any pages are double freed. - freed := make(map[pgid]bool) - all := make([]pgid, tx.db.freelist.count()) - tx.db.freelist.copyall(all) + freed := make(map[common.Pgid]bool) + all := make([]common.Pgid, tx.db.freelist.Count()) + tx.db.freelist.Copyall(all) for _, id := range all { if freed[id] { ch <- fmt.Errorf("page %d: already freed", id) @@ -48,118 +51,171 @@ func (tx *Tx) check(kvStringer KVStringer, ch chan error) { } // Track every reachable page. - reachable := make(map[pgid]*page) + reachable := make(map[common.Pgid]*common.Page) reachable[0] = tx.page(0) // meta0 reachable[1] = tx.page(1) // meta1 - if tx.meta.freelist != pgidNoFreelist { - for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { - reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) + if tx.meta.Freelist() != common.PgidNoFreelist { + for i := uint32(0); i <= tx.page(tx.meta.Freelist()).Overflow(); i++ { + reachable[tx.meta.Freelist()+common.Pgid(i)] = tx.page(tx.meta.Freelist()) } } - // Recursively check buckets. - tx.checkBucket(&tx.root, reachable, freed, kvStringer, ch) + if cfg.pageId == 0 { + // Check the whole db file, starting from the root bucket and + // recursively check all child buckets. + tx.recursivelyCheckBucket(&tx.root, reachable, freed, cfg.kvStringer, ch) - // Ensure all pages below high water mark are either reachable or freed. - for i := pgid(0); i < tx.meta.pgid; i++ { - _, isReachable := reachable[i] - if !isReachable && !freed[i] { - ch <- fmt.Errorf("page %d: unreachable unfreed", int(i)) + // Ensure all pages below high water mark are either reachable or freed. + for i := common.Pgid(0); i < tx.meta.Pgid(); i++ { + _, isReachable := reachable[i] + if !isReachable && !freed[i] { + ch <- fmt.Errorf("page %d: unreachable unfreed", int(i)) + } } + } else { + // Check the db file starting from a specified pageId. + if cfg.pageId < 2 || cfg.pageId >= uint64(tx.meta.Pgid()) { + ch <- fmt.Errorf("page ID (%d) out of range [%d, %d)", cfg.pageId, 2, tx.meta.Pgid()) + return + } + + tx.recursivelyCheckPage(common.Pgid(cfg.pageId), reachable, freed, cfg.kvStringer, ch) } +} - // Close the channel to signal completion. - close(ch) +func (tx *Tx) recursivelyCheckPage(pageId common.Pgid, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool, + kvStringer KVStringer, ch chan error) { + tx.checkInvariantProperties(pageId, reachable, freed, kvStringer, ch) + tx.recursivelyCheckBucketInPage(pageId, reachable, freed, kvStringer, ch) } -func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, +func (tx *Tx) recursivelyCheckBucketInPage(pageId common.Pgid, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool, kvStringer KVStringer, ch chan error) { - // Ignore inline buckets. - if b.root == 0 { - return - } + p := tx.page(pageId) - // Check every page used by this bucket. - b.tx.forEachPage(b.root, func(p *page, _ int, stack []pgid) { - if p.id > tx.meta.pgid { - ch <- fmt.Errorf("page %d: out of bounds: %d (stack: %v)", int(p.id), int(b.tx.meta.pgid), stack) + switch { + case p.IsBranchPage(): + for i := range p.BranchPageElements() { + elem := p.BranchPageElement(uint16(i)) + tx.recursivelyCheckBucketInPage(elem.Pgid(), reachable, freed, kvStringer, ch) } - - // Ensure each page is only referenced once. - for i := pgid(0); i <= pgid(p.overflow); i++ { - var id = p.id + i - if _, ok := reachable[id]; ok { - ch <- fmt.Errorf("page %d: multiple references (stack: %v)", int(id), stack) + case p.IsLeafPage(): + for i := range p.LeafPageElements() { + elem := p.LeafPageElement(uint16(i)) + if elem.IsBucketEntry() { + inBkt := common.NewInBucket(pageId, 0) + tmpBucket := Bucket{ + InBucket: &inBkt, + rootNode: &node{isLeaf: p.IsLeafPage()}, + FillPercent: DefaultFillPercent, + tx: tx, + } + if child := tmpBucket.Bucket(elem.Key()); child != nil { + tx.recursivelyCheckBucket(child, reachable, freed, kvStringer, ch) + } } - reachable[id] = p } + default: + ch <- fmt.Errorf("unexpected page type (flags: %x) for pgId:%d", p.Flags(), pageId) + } +} - // We should only encounter un-freed leaf and branch pages. - if freed[p.id] { - ch <- fmt.Errorf("page %d: reachable freed", int(p.id)) - } else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 { - ch <- fmt.Errorf("page %d: invalid type: %s (stack: %v)", int(p.id), p.typ(), stack) - } - }) +func (tx *Tx) recursivelyCheckBucket(b *Bucket, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool, + kvStringer KVStringer, ch chan error) { + // Ignore inline buckets. + if b.RootPage() == 0 { + return + } - tx.recursivelyCheckPages(b.root, kvStringer.KeyToString, ch) + tx.checkInvariantProperties(b.RootPage(), reachable, freed, kvStringer, ch) // Check each bucket within this bucket. _ = b.ForEachBucket(func(k []byte) error { if child := b.Bucket(k); child != nil { - tx.checkBucket(child, reachable, freed, kvStringer, ch) + tx.recursivelyCheckBucket(child, reachable, freed, kvStringer, ch) } return nil }) } -// recursivelyCheckPages confirms database consistency with respect to b-tree +func (tx *Tx) checkInvariantProperties(pageId common.Pgid, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool, + kvStringer KVStringer, ch chan error) { + tx.forEachPage(pageId, func(p *common.Page, _ int, stack []common.Pgid) { + verifyPageReachable(p, tx.meta.Pgid(), stack, reachable, freed, ch) + }) + + tx.recursivelyCheckPageKeyOrder(pageId, kvStringer.KeyToString, ch) +} + +func verifyPageReachable(p *common.Page, hwm common.Pgid, stack []common.Pgid, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool, ch chan error) { + if p.Id() > hwm { + ch <- fmt.Errorf("page %d: out of bounds: %d (stack: %v)", int(p.Id()), int(hwm), stack) + } + + // Ensure each page is only referenced once. + for i := common.Pgid(0); i <= common.Pgid(p.Overflow()); i++ { + var id = p.Id() + i + if _, ok := reachable[id]; ok { + ch <- fmt.Errorf("page %d: multiple references (stack: %v)", int(id), stack) + } + reachable[id] = p + } + + // We should only encounter un-freed leaf and branch pages. + if freed[p.Id()] { + ch <- fmt.Errorf("page %d: reachable freed", int(p.Id())) + } else if !p.IsBranchPage() && !p.IsLeafPage() { + ch <- fmt.Errorf("page %d: invalid type: %s (stack: %v)", int(p.Id()), p.Typ(), stack) + } +} + +// recursivelyCheckPageKeyOrder verifies database consistency with respect to b-tree // key order constraints: // - keys on pages must be sorted // - keys on children pages are between 2 consecutive keys on the parent's branch page). -func (tx *Tx) recursivelyCheckPages(pgId pgid, keyToString func([]byte) string, ch chan error) { - tx.recursivelyCheckPagesInternal(pgId, nil, nil, nil, keyToString, ch) +func (tx *Tx) recursivelyCheckPageKeyOrder(pgId common.Pgid, keyToString func([]byte) string, ch chan error) { + tx.recursivelyCheckPageKeyOrderInternal(pgId, nil, nil, nil, keyToString, ch) } -// recursivelyCheckPagesInternal verifies that all keys in the subtree rooted at `pgid` are: +// recursivelyCheckPageKeyOrderInternal verifies that all keys in the subtree rooted at `pgid` are: // - >=`minKeyClosed` (can be nil) // - <`maxKeyOpen` (can be nil) // - Are in right ordering relationship to their parents. // `pagesStack` is expected to contain IDs of pages from the tree root to `pgid` for the clean debugging message. -func (tx *Tx) recursivelyCheckPagesInternal( - pgId pgid, minKeyClosed, maxKeyOpen []byte, pagesStack []pgid, +func (tx *Tx) recursivelyCheckPageKeyOrderInternal( + pgId common.Pgid, minKeyClosed, maxKeyOpen []byte, pagesStack []common.Pgid, keyToString func([]byte) string, ch chan error) (maxKeyInSubtree []byte) { p := tx.page(pgId) pagesStack = append(pagesStack, pgId) switch { - case p.flags&branchPageFlag != 0: + case p.IsBranchPage(): // For branch page we navigate ranges of all subpages. runningMin := minKeyClosed - for i := range p.branchPageElements() { - elem := p.branchPageElement(uint16(i)) - verifyKeyOrder(elem.pgid, "branch", i, elem.key(), runningMin, maxKeyOpen, ch, keyToString, pagesStack) + for i := range p.BranchPageElements() { + elem := p.BranchPageElement(uint16(i)) + verifyKeyOrder(elem.Pgid(), "branch", i, elem.Key(), runningMin, maxKeyOpen, ch, keyToString, pagesStack) maxKey := maxKeyOpen - if i < len(p.branchPageElements())-1 { - maxKey = p.branchPageElement(uint16(i + 1)).key() + if i < len(p.BranchPageElements())-1 { + maxKey = p.BranchPageElement(uint16(i + 1)).Key() } - maxKeyInSubtree = tx.recursivelyCheckPagesInternal(elem.pgid, elem.key(), maxKey, pagesStack, keyToString, ch) + maxKeyInSubtree = tx.recursivelyCheckPageKeyOrderInternal(elem.Pgid(), elem.Key(), maxKey, pagesStack, keyToString, ch) runningMin = maxKeyInSubtree } return maxKeyInSubtree - case p.flags&leafPageFlag != 0: + case p.IsLeafPage(): runningMin := minKeyClosed - for i := range p.leafPageElements() { - elem := p.leafPageElement(uint16(i)) - verifyKeyOrder(pgId, "leaf", i, elem.key(), runningMin, maxKeyOpen, ch, keyToString, pagesStack) - runningMin = elem.key() + for i := range p.LeafPageElements() { + elem := p.LeafPageElement(uint16(i)) + verifyKeyOrder(pgId, "leaf", i, elem.Key(), runningMin, maxKeyOpen, ch, keyToString, pagesStack) + runningMin = elem.Key() } - if p.count > 0 { - return p.leafPageElement(p.count - 1).key() + if p.Count() > 0 { + return p.LeafPageElement(p.Count() - 1).Key() } default: - ch <- fmt.Errorf("unexpected page type for pgId:%d", pgId) + ch <- fmt.Errorf("unexpected page type (flags: %x) for pgId:%d", p.Flags(), pgId) } return maxKeyInSubtree } @@ -168,7 +224,7 @@ func (tx *Tx) recursivelyCheckPagesInternal( * verifyKeyOrder checks whether an entry with given #index on pgId (pageType: "branch|leaf") that has given "key", * is within range determined by (previousKey..maxKeyOpen) and reports found violations to the channel (ch). */ -func verifyKeyOrder(pgId pgid, pageType string, index int, key []byte, previousKey []byte, maxKeyOpen []byte, ch chan error, keyToString func([]byte) string, pagesStack []pgid) { +func verifyKeyOrder(pgId common.Pgid, pageType string, index int, key []byte, previousKey []byte, maxKeyOpen []byte, ch chan error, keyToString func([]byte) string, pagesStack []common.Pgid) { if index == 0 && previousKey != nil && compareKeys(previousKey, key) > 0 { ch <- fmt.Errorf("the first key[%d]=(hex)%s on %s page(%d) needs to be >= the key in the ancestor (%s). Stack: %v", index, keyToString(key), pageType, pgId, keyToString(previousKey), pagesStack) @@ -194,6 +250,7 @@ func verifyKeyOrder(pgId pgid, pageType string, index int, key []byte, previousK type checkConfig struct { kvStringer KVStringer + pageId uint64 } type CheckOption func(options *checkConfig) @@ -204,6 +261,13 @@ func WithKVStringer(kvStringer KVStringer) CheckOption { } } +// WithPageId sets a page ID from which the check command starts to check +func WithPageId(pageId uint64) CheckOption { + return func(c *checkConfig) { + c.pageId = pageId + } +} + // KVStringer allows to prepare human-readable diagnostic messages. type KVStringer interface { KeyToString([]byte) string diff --git a/vendor/go.etcd.io/etcd/api/v3/authpb/auth.pb.go b/vendor/go.etcd.io/etcd/api/v3/authpb/auth.pb.go index 16affcd62cf0c..37374c5a7111d 100644 --- a/vendor/go.etcd.io/etcd/api/v3/authpb/auth.pb.go +++ b/vendor/go.etcd.io/etcd/api/v3/authpb/auth.pb.go @@ -232,29 +232,30 @@ func init() { func init() { proto.RegisterFile("auth.proto", fileDescriptor_8bbd6f3875b0e874) } var fileDescriptor_8bbd6f3875b0e874 = []byte{ - // 338 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0xcf, 0x4e, 0xea, 0x40, - 0x14, 0xc6, 0x3b, 0xb4, 0x70, 0xdb, 0xc3, 0x85, 0x90, 0x13, 0x72, 0x6f, 0x83, 0x49, 0x6d, 0xba, - 0x6a, 0x5c, 0x54, 0x85, 0x8d, 0x5b, 0x8c, 0x2c, 0x5c, 0x49, 0x26, 0x18, 0x97, 0xa4, 0xa4, 0x13, - 0x24, 0xc0, 0x4c, 0x33, 0x83, 0x31, 0x6c, 0x7c, 0x0e, 0x17, 0x3e, 0x10, 0x4b, 0x1e, 0x41, 0xf0, - 0x45, 0x4c, 0x67, 0xf8, 0x13, 0xa2, 0xbb, 0xef, 0x7c, 0xe7, 0xfb, 0x66, 0x7e, 0x99, 0x01, 0x48, - 0x5f, 0x16, 0xcf, 0x49, 0x2e, 0xc5, 0x42, 0x60, 0xa5, 0xd0, 0xf9, 0xa8, 0xd5, 0x1c, 0x8b, 0xb1, - 0xd0, 0xd6, 0x65, 0xa1, 0xcc, 0x36, 0xba, 0x86, 0xfa, 0xa3, 0x62, 0xb2, 0x9b, 0x65, 0x0f, 0xf9, - 0x62, 0x22, 0xb8, 0xc2, 0x73, 0xa8, 0x72, 0x31, 0xcc, 0x53, 0xa5, 0x5e, 0x85, 0xcc, 0x7c, 0x12, - 0x92, 0xd8, 0xa5, 0xc0, 0x45, 0x7f, 0xe7, 0x44, 0x6f, 0xe0, 0x14, 0x15, 0x44, 0x70, 0x78, 0x3a, - 0x67, 0x3a, 0xf1, 0x97, 0x6a, 0x8d, 0x2d, 0x70, 0x0f, 0xcd, 0x92, 0xf6, 0x0f, 0x33, 0x36, 0xa1, - 0x2c, 0xc5, 0x8c, 0x29, 0xdf, 0x0e, 0xed, 0xd8, 0xa3, 0x66, 0xc0, 0x2b, 0xf8, 0x23, 0xcc, 0xcd, - 0xbe, 0x13, 0x92, 0xb8, 0xda, 0xfe, 0x97, 0x18, 0xe0, 0xe4, 0x94, 0x8b, 0xee, 0x63, 0xd1, 0x07, - 0x01, 0xe8, 0x33, 0x39, 0x9f, 0x28, 0x35, 0x11, 0x1c, 0x3b, 0xe0, 0xe6, 0x4c, 0xce, 0x07, 0xcb, - 0xdc, 0xa0, 0xd4, 0xdb, 0xff, 0xf7, 0x27, 0x1c, 0x53, 0x49, 0xb1, 0xa6, 0x87, 0x20, 0x36, 0xc0, - 0x9e, 0xb2, 0xe5, 0x0e, 0xb1, 0x90, 0x78, 0x06, 0x9e, 0x4c, 0xf9, 0x98, 0x0d, 0x19, 0xcf, 0x7c, - 0xdb, 0xa0, 0x6b, 0xa3, 0xc7, 0xb3, 0xe8, 0x02, 0x1c, 0x5d, 0x73, 0xc1, 0xa1, 0xbd, 0xee, 0x5d, - 0xc3, 0x42, 0x0f, 0xca, 0x4f, 0xf4, 0x7e, 0xd0, 0x6b, 0x10, 0xac, 0x81, 0x57, 0x98, 0x66, 0x2c, - 0x45, 0x03, 0x70, 0xa8, 0x98, 0xb1, 0x5f, 0x9f, 0xe7, 0x06, 0x6a, 0x53, 0xb6, 0x3c, 0x62, 0xf9, - 0xa5, 0xd0, 0x8e, 0xab, 0x6d, 0xfc, 0x09, 0x4c, 0x4f, 0x83, 0xb7, 0xfe, 0x6a, 0x13, 0x58, 0xeb, - 0x4d, 0x60, 0xad, 0xb6, 0x01, 0x59, 0x6f, 0x03, 0xf2, 0xb9, 0x0d, 0xc8, 0xfb, 0x57, 0x60, 0x8d, - 0x2a, 0xfa, 0x23, 0x3b, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x61, 0x66, 0xc6, 0x9d, 0xf4, 0x01, - 0x00, 0x00, + // 359 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0xcf, 0x4e, 0xc2, 0x40, + 0x10, 0xc6, 0xbb, 0xb4, 0x60, 0x3b, 0x08, 0x21, 0x1b, 0xa2, 0x0d, 0xc6, 0xda, 0xf4, 0xd4, 0x78, + 0x68, 0x15, 0x0e, 0x7a, 0xc5, 0xc8, 0xc1, 0x93, 0x64, 0x83, 0x31, 0xf1, 0x42, 0x8a, 0xdd, 0xd4, + 0x06, 0xd8, 0x6d, 0xda, 0xaa, 0xe1, 0xe2, 0x73, 0x78, 0xf0, 0x81, 0x38, 0xf2, 0x08, 0x82, 0x2f, + 0x62, 0xba, 0xcb, 0x9f, 0x10, 0x3d, 0xed, 0x37, 0xdf, 0x7c, 0x33, 0xfb, 0xcb, 0x2e, 0x40, 0xf0, + 0x9a, 0xbf, 0x78, 0x49, 0xca, 0x73, 0x8e, 0x2b, 0x85, 0x4e, 0x46, 0xad, 0x66, 0xc4, 0x23, 0x2e, + 0x2c, 0xbf, 0x50, 0xb2, 0xeb, 0x5c, 0x42, 0xfd, 0x21, 0xa3, 0x69, 0x37, 0x0c, 0xef, 0x93, 0x3c, + 0xe6, 0x2c, 0xc3, 0x67, 0x50, 0x65, 0x7c, 0x98, 0x04, 0x59, 0xf6, 0xce, 0xd3, 0xd0, 0x44, 0x36, + 0x72, 0x75, 0x02, 0x8c, 0xf7, 0xd7, 0x8e, 0xf3, 0x01, 0x5a, 0x31, 0x82, 0x31, 0x68, 0x2c, 0x98, + 0x52, 0x91, 0x38, 0x24, 0x42, 0xe3, 0x16, 0xe8, 0xdb, 0xc9, 0x92, 0xf0, 0xb7, 0x35, 0x6e, 0x42, + 0x39, 0xe5, 0x13, 0x9a, 0x99, 0xaa, 0xad, 0xba, 0x06, 0x91, 0x05, 0xbe, 0x80, 0x03, 0x2e, 0x6f, + 0x36, 0x35, 0x1b, 0xb9, 0xd5, 0xf6, 0x91, 0x27, 0x81, 0xbd, 0x7d, 0x2e, 0xb2, 0x89, 0x39, 0x5f, + 0x08, 0xa0, 0x4f, 0xd3, 0x69, 0x9c, 0x65, 0x31, 0x67, 0xb8, 0x03, 0x7a, 0x42, 0xd3, 0xe9, 0x60, + 0x96, 0x48, 0x94, 0x7a, 0xfb, 0x78, 0xb3, 0x61, 0x97, 0xf2, 0x8a, 0x36, 0xd9, 0x06, 0x71, 0x03, + 0xd4, 0x31, 0x9d, 0xad, 0x11, 0x0b, 0x89, 0x4f, 0xc0, 0x48, 0x03, 0x16, 0xd1, 0x21, 0x65, 0xa1, + 0xa9, 0x4a, 0x74, 0x61, 0xf4, 0x58, 0xe8, 0x9c, 0x83, 0x26, 0xc6, 0x74, 0xd0, 0x48, 0xaf, 0x7b, + 0xdb, 0x50, 0xb0, 0x01, 0xe5, 0x47, 0x72, 0x37, 0xe8, 0x35, 0x10, 0xae, 0x81, 0x51, 0x98, 0xb2, + 0x2c, 0x39, 0x03, 0xd0, 0x08, 0x9f, 0xd0, 0x7f, 0x9f, 0xe7, 0x1a, 0x6a, 0x63, 0x3a, 0xdb, 0x61, + 0x99, 0x25, 0x5b, 0x75, 0xab, 0x6d, 0xfc, 0x17, 0x98, 0xec, 0x07, 0x6f, 0xae, 0xe6, 0x4b, 0x4b, + 0x59, 0x2c, 0x2d, 0x65, 0xbe, 0xb2, 0xd0, 0x62, 0x65, 0xa1, 0xef, 0x95, 0x85, 0x3e, 0x7f, 0x2c, + 0xe5, 0xe9, 0x34, 0xe2, 0x1e, 0xcd, 0x9f, 0x43, 0x2f, 0xe6, 0x7e, 0x71, 0xfa, 0x41, 0x12, 0xfb, + 0x6f, 0x1d, 0x5f, 0xae, 0x1c, 0x55, 0xc4, 0x3f, 0x77, 0x7e, 0x03, 0x00, 0x00, 0xff, 0xff, 0x61, + 0x5a, 0xfe, 0x48, 0x13, 0x02, 0x00, 0x00, } func (m *UserAddOptions) Marshal() (dAtA []byte, err error) { diff --git a/vendor/go.etcd.io/etcd/api/v3/authpb/auth.proto b/vendor/go.etcd.io/etcd/api/v3/authpb/auth.proto index 8f82b7cf1e429..5a7856bb73dc3 100644 --- a/vendor/go.etcd.io/etcd/api/v3/authpb/auth.proto +++ b/vendor/go.etcd.io/etcd/api/v3/authpb/auth.proto @@ -3,6 +3,8 @@ package authpb; import "gogoproto/gogo.proto"; +option go_package = "go.etcd.io/etcd/api/v3/authpb"; + option (gogoproto.marshaler_all) = true; option (gogoproto.sizer_all) = true; option (gogoproto.unmarshaler_all) = true; diff --git a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/etcdserver.pb.go b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/etcdserver.pb.go index 38434d09c564d..eaefa2d20888e 100644 --- a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/etcdserver.pb.go +++ b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/etcdserver.pb.go @@ -129,31 +129,33 @@ func init() { func init() { proto.RegisterFile("etcdserver.proto", fileDescriptor_09ffbeb3bebbce7e) } var fileDescriptor_09ffbeb3bebbce7e = []byte{ - // 380 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0xd2, 0xdd, 0x6e, 0xda, 0x30, - 0x14, 0x07, 0x70, 0x0c, 0xe1, 0xcb, 0x63, 0x1b, 0xb3, 0xd0, 0x74, 0x84, 0xa6, 0x2c, 0x42, 0xbb, - 0xc8, 0xd5, 0xf6, 0x0e, 0x2c, 0x5c, 0x44, 0x2a, 0x15, 0x0d, 0x15, 0xbd, 0x76, 0xc9, 0x29, 0x58, - 0x02, 0x4c, 0x1d, 0x07, 0xf1, 0x06, 0x7d, 0x85, 0x3e, 0x12, 0x97, 0x7d, 0x82, 0xaa, 0xa5, 0x2f, - 0x52, 0x39, 0x24, 0xc4, 0xed, 0x5d, 0xf4, 0xfb, 0x9f, 0x1c, 0x1f, 0x7f, 0xd0, 0x2e, 0xea, 0x79, - 0x9c, 0xa0, 0xda, 0xa1, 0xfa, 0xbb, 0x55, 0x52, 0x4b, 0xd6, 0x29, 0x65, 0x7b, 0xdb, 0xef, 0x2d, - 0xe4, 0x42, 0x66, 0xc1, 0x3f, 0xf3, 0x75, 0xaa, 0x19, 0x3c, 0x38, 0xb4, 0x19, 0xe1, 0x7d, 0x8a, - 0x89, 0x66, 0x3d, 0x5a, 0x0d, 0x03, 0x20, 0x1e, 0xf1, 0x9d, 0xa1, 0x73, 0x78, 0xfe, 0x5d, 0x89, - 0xaa, 0x61, 0xc0, 0x7e, 0xd1, 0xc6, 0x18, 0xf5, 0x52, 0xc6, 0x50, 0xf5, 0x88, 0xdf, 0xce, 0x93, - 0xdc, 0x18, 0x50, 0x67, 0xc2, 0xf5, 0x12, 0x6a, 0x56, 0x96, 0x09, 0xfb, 0x49, 0x6b, 0x33, 0xbe, - 0x02, 0xc7, 0x0a, 0x0c, 0x18, 0x0f, 0x84, 0x82, 0xba, 0x47, 0xfc, 0x56, 0xe1, 0x81, 0x50, 0x6c, - 0x40, 0xdb, 0x13, 0x85, 0xbb, 0x19, 0x5f, 0xa5, 0x08, 0x0d, 0xeb, 0xaf, 0x92, 0x8b, 0x9a, 0x70, - 0x13, 0xe3, 0x1e, 0x9a, 0xd6, 0xa0, 0x25, 0x17, 0x35, 0xa3, 0xbd, 0x48, 0x34, 0xb4, 0xce, 0xab, - 0x90, 0xa8, 0x64, 0xf6, 0x87, 0xd2, 0xd1, 0x7e, 0x2b, 0x14, 0xd7, 0x42, 0x6e, 0xa0, 0xed, 0x11, - 0xbf, 0x96, 0x37, 0xb2, 0xdc, 0xec, 0xed, 0x86, 0x0b, 0x0d, 0xd4, 0x1a, 0x35, 0x13, 0xd6, 0xa7, - 0xf5, 0xa9, 0xd8, 0xcc, 0x11, 0xbe, 0x58, 0x33, 0x9c, 0xc8, 0xac, 0x1f, 0xe1, 0x3c, 0x55, 0x89, - 0xd8, 0x21, 0x74, 0xac, 0x5f, 0x4b, 0x36, 0x67, 0x3a, 0x95, 0x4a, 0x63, 0x0c, 0x5f, 0xad, 0x82, - 0xdc, 0x4c, 0x7a, 0x95, 0x4a, 0x95, 0xae, 0xe1, 0x9b, 0x9d, 0x9e, 0xcc, 0x4c, 0x75, 0x2d, 0xd6, - 0x08, 0xdf, 0xad, 0xa9, 0x33, 0xc9, 0xba, 0x6a, 0x85, 0x7c, 0x0d, 0xdd, 0x0f, 0x5d, 0x33, 0x63, - 0xae, 0xb9, 0xe8, 0x3b, 0x85, 0xc9, 0x12, 0x7e, 0x58, 0xa7, 0x52, 0xe0, 0xe0, 0x82, 0xb6, 0xc6, - 0xa8, 0x79, 0xcc, 0x35, 0x37, 0x9d, 0x2e, 0x65, 0x8c, 0x9f, 0x5e, 0x43, 0x6e, 0x66, 0x87, 0xff, - 0x57, 0x69, 0xa2, 0x51, 0x85, 0x41, 0xf6, 0x28, 0xce, 0xb7, 0x70, 0xe6, 0x61, 0xef, 0xf0, 0xea, - 0x56, 0x0e, 0x47, 0x97, 0x3c, 0x1d, 0x5d, 0xf2, 0x72, 0x74, 0xc9, 0xe3, 0x9b, 0x5b, 0x79, 0x0f, - 0x00, 0x00, 0xff, 0xff, 0xee, 0x40, 0xba, 0xd6, 0xa4, 0x02, 0x00, 0x00, + // 402 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0xd2, 0x41, 0xef, 0xd2, 0x30, + 0x14, 0x00, 0x70, 0x0a, 0xfb, 0xff, 0x81, 0x8a, 0x8a, 0x0d, 0x31, 0x2f, 0xc4, 0xcc, 0x05, 0x3d, + 0xec, 0xc4, 0x0e, 0x9e, 0xbc, 0xe2, 0x38, 0x2c, 0x11, 0x83, 0xc3, 0x60, 0xe2, 0xad, 0xb2, 0x27, + 0x34, 0x01, 0x3a, 0xbb, 0x6e, 0xe1, 0x1b, 0xf8, 0x15, 0xfc, 0x48, 0x1c, 0xfd, 0x04, 0x46, 0xf1, + 0x8b, 0x98, 0x8e, 0x8d, 0x55, 0x4f, 0x5b, 0x7e, 0xef, 0xf5, 0xf5, 0xb5, 0x7d, 0x74, 0x88, 0x7a, + 0x93, 0x64, 0xa8, 0x0a, 0x54, 0xd3, 0x54, 0x49, 0x2d, 0xd9, 0xa0, 0x91, 0xf4, 0xf3, 0x78, 0xb4, + 0x95, 0x5b, 0x59, 0x06, 0x02, 0xf3, 0x77, 0xcd, 0x99, 0x7c, 0x73, 0x68, 0x37, 0xc6, 0xaf, 0x39, + 0x66, 0x9a, 0x8d, 0x68, 0x3b, 0x0a, 0x81, 0x78, 0xc4, 0x77, 0x66, 0xce, 0xf9, 0xe7, 0xf3, 0x56, + 0xdc, 0x8e, 0x42, 0xf6, 0x8c, 0xde, 0x2f, 0x50, 0xef, 0x64, 0x02, 0x6d, 0x8f, 0xf8, 0xfd, 0x2a, + 0x52, 0x19, 0x03, 0xea, 0x2c, 0xb9, 0xde, 0x41, 0xc7, 0x8a, 0x95, 0xc2, 0x9e, 0xd2, 0xce, 0x9a, + 0xef, 0xc1, 0xb1, 0x02, 0x06, 0x8c, 0x87, 0x42, 0xc1, 0x9d, 0x47, 0xfc, 0x5e, 0xed, 0xa1, 0x50, + 0x6c, 0x42, 0xfb, 0x4b, 0x85, 0xc5, 0x9a, 0xef, 0x73, 0x84, 0x7b, 0x6b, 0x55, 0xc3, 0x75, 0x4e, + 0x74, 0x4c, 0xf0, 0x04, 0x5d, 0xab, 0xd1, 0x86, 0xeb, 0x9c, 0xf9, 0x49, 0x64, 0x1a, 0x7a, 0xb7, + 0x5d, 0x48, 0xdc, 0x30, 0x7b, 0x49, 0xe9, 0xfc, 0x94, 0x0a, 0xc5, 0xb5, 0x90, 0x47, 0xe8, 0x7b, + 0xc4, 0xef, 0x54, 0x85, 0x2c, 0x37, 0x67, 0xfb, 0xc8, 0x85, 0x06, 0x6a, 0xb5, 0x5a, 0x0a, 0x1b, + 0xd3, 0xbb, 0x95, 0x38, 0x6e, 0x10, 0x1e, 0x58, 0x3d, 0x5c, 0xc9, 0xec, 0x1f, 0xe3, 0x26, 0x57, + 0x99, 0x28, 0x10, 0x06, 0xd6, 0xd2, 0x86, 0xcd, 0x9d, 0xae, 0xa4, 0xd2, 0x98, 0xc0, 0x43, 0x2b, + 0xa1, 0x32, 0x13, 0x7d, 0x9f, 0x4b, 0x95, 0x1f, 0xe0, 0x91, 0x1d, 0xbd, 0x9a, 0xe9, 0xea, 0x83, + 0x38, 0x20, 0x3c, 0xb6, 0xba, 0x2e, 0xa5, 0xac, 0xaa, 0x15, 0xf2, 0x03, 0x0c, 0xff, 0xa9, 0x5a, + 0x1a, 0x73, 0xcd, 0x43, 0x7f, 0x51, 0x98, 0xed, 0xe0, 0x89, 0x75, 0x2b, 0x35, 0x4e, 0xde, 0xd2, + 0xde, 0x02, 0x35, 0x4f, 0xb8, 0xe6, 0xa6, 0xd2, 0x3b, 0x99, 0xe0, 0x7f, 0xd3, 0x50, 0x99, 0x39, + 0xe1, 0x9b, 0x7d, 0x9e, 0x69, 0x54, 0x51, 0x58, 0x0e, 0xc5, 0xed, 0x15, 0x6e, 0x3c, 0x7b, 0x7d, + 0xfe, 0xed, 0xb6, 0xce, 0x17, 0x97, 0xfc, 0xb8, 0xb8, 0xe4, 0xd7, 0xc5, 0x25, 0xdf, 0xff, 0xb8, + 0xad, 0x4f, 0x2f, 0xb6, 0x72, 0x6a, 0x86, 0x72, 0x2a, 0x64, 0x60, 0xbe, 0x01, 0x4f, 0x45, 0x50, + 0xbc, 0x0a, 0xec, 0x41, 0xfd, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x2b, 0x79, 0xf9, 0xf5, 0xc9, 0x02, + 0x00, 0x00, } func (m *Request) Marshal() (dAtA []byte, err error) { diff --git a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/etcdserver.proto b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/etcdserver.proto index 25e0aca5d9f76..ff639b9c96c7e 100644 --- a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/etcdserver.proto +++ b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/etcdserver.proto @@ -3,6 +3,8 @@ package etcdserverpb; import "gogoproto/gogo.proto"; +option go_package = "go.etcd.io/etcd/api/v3/etcdserverpb"; + option (gogoproto.marshaler_all) = true; option (gogoproto.sizer_all) = true; option (gogoproto.unmarshaler_all) = true; diff --git a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/gw/rpc.pb.gw.go b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/gw/rpc.pb.gw.go index 62aba97cdb82a..30a0174cd7a8a 100644 --- a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/gw/rpc.pb.gw.go +++ b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/gw/rpc.pb.gw.go @@ -9,19 +9,21 @@ It translates gRPC into RESTful JSON APIs. package gw import ( + protov1 "github.com/golang/protobuf/proto" + "context" "go.etcd.io/etcd/api/v3/etcdserverpb" "io" "net/http" - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" + "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" ) // Suppress "imported and not used" errors @@ -30,22 +32,18 @@ var _ io.Reader var _ status.Status var _ = runtime.String var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage +var _ = metadata.Join func request_KV_Range_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq etcdserverpb.RangeRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Range(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -53,16 +51,12 @@ func local_request_KV_Range_0(ctx context.Context, marshaler runtime.Marshaler, var protoReq etcdserverpb.RangeRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.Range(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -70,16 +64,12 @@ func request_KV_Put_0(ctx context.Context, marshaler runtime.Marshaler, client e var protoReq etcdserverpb.PutRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Put(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -87,16 +77,12 @@ func local_request_KV_Put_0(ctx context.Context, marshaler runtime.Marshaler, se var protoReq etcdserverpb.PutRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.Put(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -104,16 +90,12 @@ func request_KV_DeleteRange_0(ctx context.Context, marshaler runtime.Marshaler, var protoReq etcdserverpb.DeleteRangeRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.DeleteRange(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -121,16 +103,12 @@ func local_request_KV_DeleteRange_0(ctx context.Context, marshaler runtime.Marsh var protoReq etcdserverpb.DeleteRangeRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.DeleteRange(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -138,16 +116,12 @@ func request_KV_Txn_0(ctx context.Context, marshaler runtime.Marshaler, client e var protoReq etcdserverpb.TxnRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Txn(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -155,16 +129,12 @@ func local_request_KV_Txn_0(ctx context.Context, marshaler runtime.Marshaler, se var protoReq etcdserverpb.TxnRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.Txn(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -172,16 +142,12 @@ func request_KV_Compact_0(ctx context.Context, marshaler runtime.Marshaler, clie var protoReq etcdserverpb.CompactionRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Compact(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -189,85 +155,72 @@ func local_request_KV_Compact_0(ctx context.Context, marshaler runtime.Marshaler var protoReq etcdserverpb.CompactionRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.Compact(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } -func request_Watch_Watch_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.WatchClient, req *http.Request, pathParams map[string]string) (etcdserverpb.Watch_WatchClient, runtime.ServerMetadata, error) { +func request_Watch_Watch_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.WatchClient, req *http.Request, pathParams map[string]string) (etcdserverpb.Watch_WatchClient, runtime.ServerMetadata, chan error, error) { var metadata runtime.ServerMetadata + errChan := make(chan error, 1) stream, err := client.Watch(ctx) if err != nil { - grpclog.Infof("Failed to start streaming: %v", err) - return nil, metadata, err + grpclog.Errorf("Failed to start streaming: %v", err) + close(errChan) + return nil, metadata, errChan, err } dec := marshaler.NewDecoder(req.Body) handleSend := func() error { var protoReq etcdserverpb.WatchRequest - err := dec.Decode(&protoReq) + err := dec.Decode(protov1.MessageV2(&protoReq)) if err == io.EOF { return err } if err != nil { - grpclog.Infof("Failed to decode request: %v", err) - return err + grpclog.Errorf("Failed to decode request: %v", err) + return status.Errorf(codes.InvalidArgument, "Failed to decode request: %v", err) } if err := stream.Send(&protoReq); err != nil { - grpclog.Infof("Failed to send request: %v", err) + grpclog.Errorf("Failed to send request: %v", err) return err } return nil } - if err := handleSend(); err != nil { - if cerr := stream.CloseSend(); cerr != nil { - grpclog.Infof("Failed to terminate client stream: %v", cerr) - } - if err == io.EOF { - return stream, metadata, nil - } - return nil, metadata, err - } go func() { + defer close(errChan) for { if err := handleSend(); err != nil { + errChan <- err break } } if err := stream.CloseSend(); err != nil { - grpclog.Infof("Failed to terminate client stream: %v", err) + grpclog.Errorf("Failed to terminate client stream: %v", err) } }() header, err := stream.Header() if err != nil { - grpclog.Infof("Failed to get header from client: %v", err) - return nil, metadata, err + grpclog.Errorf("Failed to get header from client: %v", err) + return nil, metadata, errChan, err } metadata.HeaderMD = header - return stream, metadata, nil + return stream, metadata, errChan, nil } func request_Lease_LeaseGrant_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq etcdserverpb.LeaseGrantRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.LeaseGrant(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -275,16 +228,12 @@ func local_request_Lease_LeaseGrant_0(ctx context.Context, marshaler runtime.Mar var protoReq etcdserverpb.LeaseGrantRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.LeaseGrant(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -292,16 +241,12 @@ func request_Lease_LeaseRevoke_0(ctx context.Context, marshaler runtime.Marshale var protoReq etcdserverpb.LeaseRevokeRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.LeaseRevoke(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -309,16 +254,12 @@ func local_request_Lease_LeaseRevoke_0(ctx context.Context, marshaler runtime.Ma var protoReq etcdserverpb.LeaseRevokeRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.LeaseRevoke(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -326,16 +267,12 @@ func request_Lease_LeaseRevoke_1(ctx context.Context, marshaler runtime.Marshale var protoReq etcdserverpb.LeaseRevokeRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.LeaseRevoke(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -343,85 +280,72 @@ func local_request_Lease_LeaseRevoke_1(ctx context.Context, marshaler runtime.Ma var protoReq etcdserverpb.LeaseRevokeRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.LeaseRevoke(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } -func request_Lease_LeaseKeepAlive_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (etcdserverpb.Lease_LeaseKeepAliveClient, runtime.ServerMetadata, error) { +func request_Lease_LeaseKeepAlive_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (etcdserverpb.Lease_LeaseKeepAliveClient, runtime.ServerMetadata, chan error, error) { var metadata runtime.ServerMetadata + errChan := make(chan error, 1) stream, err := client.LeaseKeepAlive(ctx) if err != nil { - grpclog.Infof("Failed to start streaming: %v", err) - return nil, metadata, err + grpclog.Errorf("Failed to start streaming: %v", err) + close(errChan) + return nil, metadata, errChan, err } dec := marshaler.NewDecoder(req.Body) handleSend := func() error { var protoReq etcdserverpb.LeaseKeepAliveRequest - err := dec.Decode(&protoReq) + err := dec.Decode(protov1.MessageV2(&protoReq)) if err == io.EOF { return err } if err != nil { - grpclog.Infof("Failed to decode request: %v", err) - return err + grpclog.Errorf("Failed to decode request: %v", err) + return status.Errorf(codes.InvalidArgument, "Failed to decode request: %v", err) } if err := stream.Send(&protoReq); err != nil { - grpclog.Infof("Failed to send request: %v", err) + grpclog.Errorf("Failed to send request: %v", err) return err } return nil } - if err := handleSend(); err != nil { - if cerr := stream.CloseSend(); cerr != nil { - grpclog.Infof("Failed to terminate client stream: %v", cerr) - } - if err == io.EOF { - return stream, metadata, nil - } - return nil, metadata, err - } go func() { + defer close(errChan) for { if err := handleSend(); err != nil { + errChan <- err break } } if err := stream.CloseSend(); err != nil { - grpclog.Infof("Failed to terminate client stream: %v", err) + grpclog.Errorf("Failed to terminate client stream: %v", err) } }() header, err := stream.Header() if err != nil { - grpclog.Infof("Failed to get header from client: %v", err) - return nil, metadata, err + grpclog.Errorf("Failed to get header from client: %v", err) + return nil, metadata, errChan, err } metadata.HeaderMD = header - return stream, metadata, nil + return stream, metadata, errChan, nil } func request_Lease_LeaseTimeToLive_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq etcdserverpb.LeaseTimeToLiveRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.LeaseTimeToLive(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -429,16 +353,12 @@ func local_request_Lease_LeaseTimeToLive_0(ctx context.Context, marshaler runtim var protoReq etcdserverpb.LeaseTimeToLiveRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.LeaseTimeToLive(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -446,16 +366,12 @@ func request_Lease_LeaseTimeToLive_1(ctx context.Context, marshaler runtime.Mars var protoReq etcdserverpb.LeaseTimeToLiveRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.LeaseTimeToLive(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -463,16 +379,12 @@ func local_request_Lease_LeaseTimeToLive_1(ctx context.Context, marshaler runtim var protoReq etcdserverpb.LeaseTimeToLiveRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.LeaseTimeToLive(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -480,16 +392,12 @@ func request_Lease_LeaseLeases_0(ctx context.Context, marshaler runtime.Marshale var protoReq etcdserverpb.LeaseLeasesRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.LeaseLeases(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -497,16 +405,12 @@ func local_request_Lease_LeaseLeases_0(ctx context.Context, marshaler runtime.Ma var protoReq etcdserverpb.LeaseLeasesRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.LeaseLeases(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -514,16 +418,12 @@ func request_Lease_LeaseLeases_1(ctx context.Context, marshaler runtime.Marshale var protoReq etcdserverpb.LeaseLeasesRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.LeaseLeases(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -531,16 +431,12 @@ func local_request_Lease_LeaseLeases_1(ctx context.Context, marshaler runtime.Ma var protoReq etcdserverpb.LeaseLeasesRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.LeaseLeases(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -548,16 +444,12 @@ func request_Cluster_MemberAdd_0(ctx context.Context, marshaler runtime.Marshale var protoReq etcdserverpb.MemberAddRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.MemberAdd(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -565,16 +457,12 @@ func local_request_Cluster_MemberAdd_0(ctx context.Context, marshaler runtime.Ma var protoReq etcdserverpb.MemberAddRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.MemberAdd(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -582,16 +470,12 @@ func request_Cluster_MemberRemove_0(ctx context.Context, marshaler runtime.Marsh var protoReq etcdserverpb.MemberRemoveRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.MemberRemove(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -599,16 +483,12 @@ func local_request_Cluster_MemberRemove_0(ctx context.Context, marshaler runtime var protoReq etcdserverpb.MemberRemoveRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.MemberRemove(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -616,16 +496,12 @@ func request_Cluster_MemberUpdate_0(ctx context.Context, marshaler runtime.Marsh var protoReq etcdserverpb.MemberUpdateRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.MemberUpdate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -633,16 +509,12 @@ func local_request_Cluster_MemberUpdate_0(ctx context.Context, marshaler runtime var protoReq etcdserverpb.MemberUpdateRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.MemberUpdate(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -650,16 +522,12 @@ func request_Cluster_MemberList_0(ctx context.Context, marshaler runtime.Marshal var protoReq etcdserverpb.MemberListRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.MemberList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -667,16 +535,12 @@ func local_request_Cluster_MemberList_0(ctx context.Context, marshaler runtime.M var protoReq etcdserverpb.MemberListRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.MemberList(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -684,16 +548,12 @@ func request_Cluster_MemberPromote_0(ctx context.Context, marshaler runtime.Mars var protoReq etcdserverpb.MemberPromoteRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.MemberPromote(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -701,16 +561,12 @@ func local_request_Cluster_MemberPromote_0(ctx context.Context, marshaler runtim var protoReq etcdserverpb.MemberPromoteRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.MemberPromote(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -718,16 +574,12 @@ func request_Maintenance_Alarm_0(ctx context.Context, marshaler runtime.Marshale var protoReq etcdserverpb.AlarmRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Alarm(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -735,16 +587,12 @@ func local_request_Maintenance_Alarm_0(ctx context.Context, marshaler runtime.Ma var protoReq etcdserverpb.AlarmRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.Alarm(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -752,16 +600,12 @@ func request_Maintenance_Status_0(ctx context.Context, marshaler runtime.Marshal var protoReq etcdserverpb.StatusRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Status(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -769,16 +613,12 @@ func local_request_Maintenance_Status_0(ctx context.Context, marshaler runtime.M var protoReq etcdserverpb.StatusRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.Status(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -786,16 +626,12 @@ func request_Maintenance_Defragment_0(ctx context.Context, marshaler runtime.Mar var protoReq etcdserverpb.DefragmentRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Defragment(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -803,16 +639,12 @@ func local_request_Maintenance_Defragment_0(ctx context.Context, marshaler runti var protoReq etcdserverpb.DefragmentRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.Defragment(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -820,16 +652,12 @@ func request_Maintenance_Hash_0(ctx context.Context, marshaler runtime.Marshaler var protoReq etcdserverpb.HashRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Hash(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -837,16 +665,12 @@ func local_request_Maintenance_Hash_0(ctx context.Context, marshaler runtime.Mar var protoReq etcdserverpb.HashRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.Hash(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -854,16 +678,12 @@ func request_Maintenance_HashKV_0(ctx context.Context, marshaler runtime.Marshal var protoReq etcdserverpb.HashKVRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.HashKV(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -871,16 +691,12 @@ func local_request_Maintenance_HashKV_0(ctx context.Context, marshaler runtime.M var protoReq etcdserverpb.HashKVRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.HashKV(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -888,11 +704,7 @@ func request_Maintenance_Snapshot_0(ctx context.Context, marshaler runtime.Marsh var protoReq etcdserverpb.SnapshotRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -913,16 +725,12 @@ func request_Maintenance_MoveLeader_0(ctx context.Context, marshaler runtime.Mar var protoReq etcdserverpb.MoveLeaderRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.MoveLeader(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -930,16 +738,12 @@ func local_request_Maintenance_MoveLeader_0(ctx context.Context, marshaler runti var protoReq etcdserverpb.MoveLeaderRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.MoveLeader(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -947,16 +751,12 @@ func request_Maintenance_Downgrade_0(ctx context.Context, marshaler runtime.Mars var protoReq etcdserverpb.DowngradeRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Downgrade(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -964,16 +764,12 @@ func local_request_Maintenance_Downgrade_0(ctx context.Context, marshaler runtim var protoReq etcdserverpb.DowngradeRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.Downgrade(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -981,16 +777,12 @@ func request_Auth_AuthEnable_0(ctx context.Context, marshaler runtime.Marshaler, var protoReq etcdserverpb.AuthEnableRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.AuthEnable(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -998,16 +790,12 @@ func local_request_Auth_AuthEnable_0(ctx context.Context, marshaler runtime.Mars var protoReq etcdserverpb.AuthEnableRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.AuthEnable(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -1015,16 +803,12 @@ func request_Auth_AuthDisable_0(ctx context.Context, marshaler runtime.Marshaler var protoReq etcdserverpb.AuthDisableRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.AuthDisable(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -1032,16 +816,12 @@ func local_request_Auth_AuthDisable_0(ctx context.Context, marshaler runtime.Mar var protoReq etcdserverpb.AuthDisableRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.AuthDisable(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -1049,16 +829,12 @@ func request_Auth_AuthStatus_0(ctx context.Context, marshaler runtime.Marshaler, var protoReq etcdserverpb.AuthStatusRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.AuthStatus(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -1066,16 +842,12 @@ func local_request_Auth_AuthStatus_0(ctx context.Context, marshaler runtime.Mars var protoReq etcdserverpb.AuthStatusRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.AuthStatus(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -1083,16 +855,12 @@ func request_Auth_Authenticate_0(ctx context.Context, marshaler runtime.Marshale var protoReq etcdserverpb.AuthenticateRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Authenticate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -1100,16 +868,12 @@ func local_request_Auth_Authenticate_0(ctx context.Context, marshaler runtime.Ma var protoReq etcdserverpb.AuthenticateRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.Authenticate(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -1117,16 +881,12 @@ func request_Auth_UserAdd_0(ctx context.Context, marshaler runtime.Marshaler, cl var protoReq etcdserverpb.AuthUserAddRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.UserAdd(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -1134,16 +894,12 @@ func local_request_Auth_UserAdd_0(ctx context.Context, marshaler runtime.Marshal var protoReq etcdserverpb.AuthUserAddRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.UserAdd(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -1151,16 +907,12 @@ func request_Auth_UserGet_0(ctx context.Context, marshaler runtime.Marshaler, cl var protoReq etcdserverpb.AuthUserGetRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.UserGet(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -1168,16 +920,12 @@ func local_request_Auth_UserGet_0(ctx context.Context, marshaler runtime.Marshal var protoReq etcdserverpb.AuthUserGetRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.UserGet(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -1185,16 +933,12 @@ func request_Auth_UserList_0(ctx context.Context, marshaler runtime.Marshaler, c var protoReq etcdserverpb.AuthUserListRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.UserList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -1202,16 +946,12 @@ func local_request_Auth_UserList_0(ctx context.Context, marshaler runtime.Marsha var protoReq etcdserverpb.AuthUserListRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.UserList(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -1219,16 +959,12 @@ func request_Auth_UserDelete_0(ctx context.Context, marshaler runtime.Marshaler, var protoReq etcdserverpb.AuthUserDeleteRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.UserDelete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -1236,16 +972,12 @@ func local_request_Auth_UserDelete_0(ctx context.Context, marshaler runtime.Mars var protoReq etcdserverpb.AuthUserDeleteRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.UserDelete(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -1253,16 +985,12 @@ func request_Auth_UserChangePassword_0(ctx context.Context, marshaler runtime.Ma var protoReq etcdserverpb.AuthUserChangePasswordRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.UserChangePassword(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -1270,16 +998,12 @@ func local_request_Auth_UserChangePassword_0(ctx context.Context, marshaler runt var protoReq etcdserverpb.AuthUserChangePasswordRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.UserChangePassword(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -1287,16 +1011,12 @@ func request_Auth_UserGrantRole_0(ctx context.Context, marshaler runtime.Marshal var protoReq etcdserverpb.AuthUserGrantRoleRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.UserGrantRole(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -1304,16 +1024,12 @@ func local_request_Auth_UserGrantRole_0(ctx context.Context, marshaler runtime.M var protoReq etcdserverpb.AuthUserGrantRoleRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.UserGrantRole(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -1321,16 +1037,12 @@ func request_Auth_UserRevokeRole_0(ctx context.Context, marshaler runtime.Marsha var protoReq etcdserverpb.AuthUserRevokeRoleRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.UserRevokeRole(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -1338,16 +1050,12 @@ func local_request_Auth_UserRevokeRole_0(ctx context.Context, marshaler runtime. var protoReq etcdserverpb.AuthUserRevokeRoleRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.UserRevokeRole(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -1355,16 +1063,12 @@ func request_Auth_RoleAdd_0(ctx context.Context, marshaler runtime.Marshaler, cl var protoReq etcdserverpb.AuthRoleAddRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.RoleAdd(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -1372,16 +1076,12 @@ func local_request_Auth_RoleAdd_0(ctx context.Context, marshaler runtime.Marshal var protoReq etcdserverpb.AuthRoleAddRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.RoleAdd(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -1389,16 +1089,12 @@ func request_Auth_RoleGet_0(ctx context.Context, marshaler runtime.Marshaler, cl var protoReq etcdserverpb.AuthRoleGetRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.RoleGet(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -1406,16 +1102,12 @@ func local_request_Auth_RoleGet_0(ctx context.Context, marshaler runtime.Marshal var protoReq etcdserverpb.AuthRoleGetRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.RoleGet(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -1423,16 +1115,12 @@ func request_Auth_RoleList_0(ctx context.Context, marshaler runtime.Marshaler, c var protoReq etcdserverpb.AuthRoleListRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.RoleList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -1440,16 +1128,12 @@ func local_request_Auth_RoleList_0(ctx context.Context, marshaler runtime.Marsha var protoReq etcdserverpb.AuthRoleListRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.RoleList(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -1457,16 +1141,12 @@ func request_Auth_RoleDelete_0(ctx context.Context, marshaler runtime.Marshaler, var protoReq etcdserverpb.AuthRoleDeleteRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.RoleDelete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -1474,16 +1154,12 @@ func local_request_Auth_RoleDelete_0(ctx context.Context, marshaler runtime.Mars var protoReq etcdserverpb.AuthRoleDeleteRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.RoleDelete(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -1491,16 +1167,12 @@ func request_Auth_RoleGrantPermission_0(ctx context.Context, marshaler runtime.M var protoReq etcdserverpb.AuthRoleGrantPermissionRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.RoleGrantPermission(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -1508,16 +1180,12 @@ func local_request_Auth_RoleGrantPermission_0(ctx context.Context, marshaler run var protoReq etcdserverpb.AuthRoleGrantPermissionRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.RoleGrantPermission(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -1525,16 +1193,12 @@ func request_Auth_RoleRevokePermission_0(ctx context.Context, marshaler runtime. var protoReq etcdserverpb.AuthRoleRevokePermissionRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.RoleRevokePermission(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -1542,121 +1206,144 @@ func local_request_Auth_RoleRevokePermission_0(ctx context.Context, marshaler ru var protoReq etcdserverpb.AuthRoleRevokePermissionRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.RoleRevokePermission(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } // etcdserverpb.RegisterKVHandlerServer registers the http handlers for service KV to "mux". // UnaryRPC :call etcdserverpb.KVServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterKVHandlerFromEndpoint instead. +// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call. func RegisterKVHandlerServer(ctx context.Context, mux *runtime.ServeMux, server etcdserverpb.KVServer) error { mux.Handle("POST", pattern_KV_Range_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.KV/Range", runtime.WithHTTPPathPattern("/v3/kv/range")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_KV_Range_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_KV_Range_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_KV_Range_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_KV_Range_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_KV_Put_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.KV/Put", runtime.WithHTTPPathPattern("/v3/kv/put")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_KV_Put_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_KV_Put_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_KV_Put_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_KV_Put_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_KV_DeleteRange_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.KV/DeleteRange", runtime.WithHTTPPathPattern("/v3/kv/deleterange")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_KV_DeleteRange_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_KV_DeleteRange_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_KV_DeleteRange_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_KV_DeleteRange_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_KV_Txn_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.KV/Txn", runtime.WithHTTPPathPattern("/v3/kv/txn")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_KV_Txn_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_KV_Txn_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_KV_Txn_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_KV_Txn_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_KV_Compact_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.KV/Compact", runtime.WithHTTPPathPattern("/v3/kv/compaction")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_KV_Compact_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_KV_Compact_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_KV_Compact_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_KV_Compact_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1666,6 +1353,8 @@ func RegisterKVHandlerServer(ctx context.Context, mux *runtime.ServeMux, server // etcdserverpb.RegisterWatchHandlerServer registers the http handlers for service Watch to "mux". // UnaryRPC :call etcdserverpb.WatchServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterWatchHandlerFromEndpoint instead. +// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call. func RegisterWatchHandlerServer(ctx context.Context, mux *runtime.ServeMux, server etcdserverpb.WatchServer) error { mux.Handle("POST", pattern_Watch_Watch_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -1681,65 +1370,82 @@ func RegisterWatchHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv // etcdserverpb.RegisterLeaseHandlerServer registers the http handlers for service Lease to "mux". // UnaryRPC :call etcdserverpb.LeaseServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterLeaseHandlerFromEndpoint instead. +// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call. func RegisterLeaseHandlerServer(ctx context.Context, mux *runtime.ServeMux, server etcdserverpb.LeaseServer) error { mux.Handle("POST", pattern_Lease_LeaseGrant_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Lease/LeaseGrant", runtime.WithHTTPPathPattern("/v3/lease/grant")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Lease_LeaseGrant_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_Lease_LeaseGrant_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Lease_LeaseGrant_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Lease_LeaseGrant_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_Lease_LeaseRevoke_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Lease/LeaseRevoke", runtime.WithHTTPPathPattern("/v3/lease/revoke")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Lease_LeaseRevoke_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_Lease_LeaseRevoke_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Lease_LeaseRevoke_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Lease_LeaseRevoke_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_Lease_LeaseRevoke_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Lease/LeaseRevoke", runtime.WithHTTPPathPattern("/v3/kv/lease/revoke")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Lease_LeaseRevoke_1(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_Lease_LeaseRevoke_1(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Lease_LeaseRevoke_1(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Lease_LeaseRevoke_1(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1753,80 +1459,100 @@ func RegisterLeaseHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv mux.Handle("POST", pattern_Lease_LeaseTimeToLive_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Lease/LeaseTimeToLive", runtime.WithHTTPPathPattern("/v3/lease/timetolive")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Lease_LeaseTimeToLive_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_Lease_LeaseTimeToLive_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Lease_LeaseTimeToLive_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Lease_LeaseTimeToLive_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_Lease_LeaseTimeToLive_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Lease/LeaseTimeToLive", runtime.WithHTTPPathPattern("/v3/kv/lease/timetolive")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Lease_LeaseTimeToLive_1(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_Lease_LeaseTimeToLive_1(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Lease_LeaseTimeToLive_1(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Lease_LeaseTimeToLive_1(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_Lease_LeaseLeases_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Lease/LeaseLeases", runtime.WithHTTPPathPattern("/v3/lease/leases")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Lease_LeaseLeases_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_Lease_LeaseLeases_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Lease_LeaseLeases_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Lease_LeaseLeases_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_Lease_LeaseLeases_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Lease/LeaseLeases", runtime.WithHTTPPathPattern("/v3/kv/lease/leases")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Lease_LeaseLeases_1(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_Lease_LeaseLeases_1(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Lease_LeaseLeases_1(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Lease_LeaseLeases_1(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1836,105 +1562,132 @@ func RegisterLeaseHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv // etcdserverpb.RegisterClusterHandlerServer registers the http handlers for service Cluster to "mux". // UnaryRPC :call etcdserverpb.ClusterServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterClusterHandlerFromEndpoint instead. +// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call. func RegisterClusterHandlerServer(ctx context.Context, mux *runtime.ServeMux, server etcdserverpb.ClusterServer) error { mux.Handle("POST", pattern_Cluster_MemberAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Cluster/MemberAdd", runtime.WithHTTPPathPattern("/v3/cluster/member/add")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Cluster_MemberAdd_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_Cluster_MemberAdd_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Cluster_MemberAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Cluster_MemberAdd_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_Cluster_MemberRemove_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Cluster/MemberRemove", runtime.WithHTTPPathPattern("/v3/cluster/member/remove")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Cluster_MemberRemove_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_Cluster_MemberRemove_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Cluster_MemberRemove_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Cluster_MemberRemove_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_Cluster_MemberUpdate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Cluster/MemberUpdate", runtime.WithHTTPPathPattern("/v3/cluster/member/update")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Cluster_MemberUpdate_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_Cluster_MemberUpdate_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Cluster_MemberUpdate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Cluster_MemberUpdate_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_Cluster_MemberList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Cluster/MemberList", runtime.WithHTTPPathPattern("/v3/cluster/member/list")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Cluster_MemberList_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_Cluster_MemberList_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Cluster_MemberList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Cluster_MemberList_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_Cluster_MemberPromote_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Cluster/MemberPromote", runtime.WithHTTPPathPattern("/v3/cluster/member/promote")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Cluster_MemberPromote_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_Cluster_MemberPromote_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Cluster_MemberPromote_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Cluster_MemberPromote_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1944,105 +1697,132 @@ func RegisterClusterHandlerServer(ctx context.Context, mux *runtime.ServeMux, se // etcdserverpb.RegisterMaintenanceHandlerServer registers the http handlers for service Maintenance to "mux". // UnaryRPC :call etcdserverpb.MaintenanceServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterMaintenanceHandlerFromEndpoint instead. +// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call. func RegisterMaintenanceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server etcdserverpb.MaintenanceServer) error { mux.Handle("POST", pattern_Maintenance_Alarm_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Maintenance/Alarm", runtime.WithHTTPPathPattern("/v3/maintenance/alarm")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Maintenance_Alarm_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_Maintenance_Alarm_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Maintenance_Alarm_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Maintenance_Alarm_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_Maintenance_Status_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Maintenance/Status", runtime.WithHTTPPathPattern("/v3/maintenance/status")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Maintenance_Status_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_Maintenance_Status_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Maintenance_Status_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Maintenance_Status_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_Maintenance_Defragment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Maintenance/Defragment", runtime.WithHTTPPathPattern("/v3/maintenance/defragment")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Maintenance_Defragment_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_Maintenance_Defragment_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Maintenance_Defragment_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Maintenance_Defragment_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_Maintenance_Hash_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Maintenance/Hash", runtime.WithHTTPPathPattern("/v3/maintenance/hash")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Maintenance_Hash_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_Maintenance_Hash_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Maintenance_Hash_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Maintenance_Hash_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_Maintenance_HashKV_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Maintenance/HashKV", runtime.WithHTTPPathPattern("/v3/maintenance/hashkv")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Maintenance_HashKV_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_Maintenance_HashKV_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Maintenance_HashKV_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Maintenance_HashKV_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -2056,40 +1836,50 @@ func RegisterMaintenanceHandlerServer(ctx context.Context, mux *runtime.ServeMux mux.Handle("POST", pattern_Maintenance_MoveLeader_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Maintenance/MoveLeader", runtime.WithHTTPPathPattern("/v3/maintenance/transfer-leadership")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Maintenance_MoveLeader_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_Maintenance_MoveLeader_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Maintenance_MoveLeader_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Maintenance_MoveLeader_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_Maintenance_Downgrade_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Maintenance/Downgrade", runtime.WithHTTPPathPattern("/v3/maintenance/downgrade")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Maintenance_Downgrade_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_Maintenance_Downgrade_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Maintenance_Downgrade_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Maintenance_Downgrade_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -2099,345 +1889,432 @@ func RegisterMaintenanceHandlerServer(ctx context.Context, mux *runtime.ServeMux // etcdserverpb.RegisterAuthHandlerServer registers the http handlers for service Auth to "mux". // UnaryRPC :call etcdserverpb.AuthServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterAuthHandlerFromEndpoint instead. +// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call. func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, server etcdserverpb.AuthServer) error { mux.Handle("POST", pattern_Auth_AuthEnable_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/AuthEnable", runtime.WithHTTPPathPattern("/v3/auth/enable")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Auth_AuthEnable_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_Auth_AuthEnable_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Auth_AuthEnable_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_AuthEnable_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_Auth_AuthDisable_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/AuthDisable", runtime.WithHTTPPathPattern("/v3/auth/disable")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Auth_AuthDisable_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_Auth_AuthDisable_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Auth_AuthDisable_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_AuthDisable_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_Auth_AuthStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/AuthStatus", runtime.WithHTTPPathPattern("/v3/auth/status")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Auth_AuthStatus_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_Auth_AuthStatus_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Auth_AuthStatus_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_AuthStatus_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_Auth_Authenticate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/Authenticate", runtime.WithHTTPPathPattern("/v3/auth/authenticate")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Auth_Authenticate_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_Auth_Authenticate_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Auth_Authenticate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_Authenticate_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_Auth_UserAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/UserAdd", runtime.WithHTTPPathPattern("/v3/auth/user/add")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Auth_UserAdd_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_Auth_UserAdd_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Auth_UserAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_UserAdd_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_Auth_UserGet_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/UserGet", runtime.WithHTTPPathPattern("/v3/auth/user/get")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Auth_UserGet_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_Auth_UserGet_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Auth_UserGet_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_UserGet_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_Auth_UserList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/UserList", runtime.WithHTTPPathPattern("/v3/auth/user/list")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Auth_UserList_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_Auth_UserList_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Auth_UserList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_UserList_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_Auth_UserDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/UserDelete", runtime.WithHTTPPathPattern("/v3/auth/user/delete")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Auth_UserDelete_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_Auth_UserDelete_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Auth_UserDelete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_UserDelete_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_Auth_UserChangePassword_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/UserChangePassword", runtime.WithHTTPPathPattern("/v3/auth/user/changepw")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Auth_UserChangePassword_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_Auth_UserChangePassword_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Auth_UserChangePassword_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_UserChangePassword_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_Auth_UserGrantRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/UserGrantRole", runtime.WithHTTPPathPattern("/v3/auth/user/grant")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Auth_UserGrantRole_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_Auth_UserGrantRole_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Auth_UserGrantRole_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_UserGrantRole_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_Auth_UserRevokeRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/UserRevokeRole", runtime.WithHTTPPathPattern("/v3/auth/user/revoke")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Auth_UserRevokeRole_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_Auth_UserRevokeRole_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Auth_UserRevokeRole_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_UserRevokeRole_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_Auth_RoleAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/RoleAdd", runtime.WithHTTPPathPattern("/v3/auth/role/add")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Auth_RoleAdd_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_Auth_RoleAdd_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Auth_RoleAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_RoleAdd_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_Auth_RoleGet_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/RoleGet", runtime.WithHTTPPathPattern("/v3/auth/role/get")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Auth_RoleGet_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_Auth_RoleGet_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Auth_RoleGet_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_RoleGet_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_Auth_RoleList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/RoleList", runtime.WithHTTPPathPattern("/v3/auth/role/list")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Auth_RoleList_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_Auth_RoleList_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Auth_RoleList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_RoleList_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_Auth_RoleDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/RoleDelete", runtime.WithHTTPPathPattern("/v3/auth/role/delete")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Auth_RoleDelete_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_Auth_RoleDelete_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Auth_RoleDelete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_RoleDelete_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_Auth_RoleGrantPermission_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/RoleGrantPermission", runtime.WithHTTPPathPattern("/v3/auth/role/grant")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Auth_RoleGrantPermission_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_Auth_RoleGrantPermission_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Auth_RoleGrantPermission_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_RoleGrantPermission_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_Auth_RoleRevokePermission_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/RoleRevokePermission", runtime.WithHTTPPathPattern("/v3/auth/role/revoke")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Auth_RoleRevokePermission_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_Auth_RoleRevokePermission_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Auth_RoleRevokePermission_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_RoleRevokePermission_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -2447,21 +2324,21 @@ func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve // RegisterKVHandlerFromEndpoint is same as RegisterKVHandler but // automatically dials to "endpoint" and closes the connection when "ctx" gets done. func RegisterKVHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) + conn, err := grpc.NewClient(endpoint, opts...) if err != nil { return err } defer func() { if err != nil { if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr) } return } go func() { <-ctx.Done() if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr) } }() }() @@ -2479,26 +2356,28 @@ func RegisterKVHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.Cl // to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "KVClient". // Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "KVClient" // doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "KVClient" to call the correct interceptors. +// "KVClient" to call the correct interceptors. This client ignores the HTTP middlewares. func RegisterKVHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.KVClient) error { mux.Handle("POST", pattern_KV_Range_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.KV/Range", runtime.WithHTTPPathPattern("/v3/kv/range")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_KV_Range_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_KV_Range_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_KV_Range_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_KV_Range_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -2506,19 +2385,21 @@ func RegisterKVHandlerClient(ctx context.Context, mux *runtime.ServeMux, client ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.KV/Put", runtime.WithHTTPPathPattern("/v3/kv/put")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_KV_Put_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_KV_Put_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_KV_Put_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_KV_Put_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -2526,19 +2407,21 @@ func RegisterKVHandlerClient(ctx context.Context, mux *runtime.ServeMux, client ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.KV/DeleteRange", runtime.WithHTTPPathPattern("/v3/kv/deleterange")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_KV_DeleteRange_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_KV_DeleteRange_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_KV_DeleteRange_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_KV_DeleteRange_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -2546,19 +2429,21 @@ func RegisterKVHandlerClient(ctx context.Context, mux *runtime.ServeMux, client ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.KV/Txn", runtime.WithHTTPPathPattern("/v3/kv/txn")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_KV_Txn_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_KV_Txn_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_KV_Txn_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_KV_Txn_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -2566,19 +2451,21 @@ func RegisterKVHandlerClient(ctx context.Context, mux *runtime.ServeMux, client ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.KV/Compact", runtime.WithHTTPPathPattern("/v3/kv/compaction")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_KV_Compact_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_KV_Compact_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_KV_Compact_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_KV_Compact_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -2586,15 +2473,15 @@ func RegisterKVHandlerClient(ctx context.Context, mux *runtime.ServeMux, client } var ( - pattern_KV_Range_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "kv", "range"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_KV_Range_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "kv", "range"}, "")) - pattern_KV_Put_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "kv", "put"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_KV_Put_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "kv", "put"}, "")) - pattern_KV_DeleteRange_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "kv", "deleterange"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_KV_DeleteRange_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "kv", "deleterange"}, "")) - pattern_KV_Txn_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "kv", "txn"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_KV_Txn_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "kv", "txn"}, "")) - pattern_KV_Compact_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "kv", "compaction"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_KV_Compact_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "kv", "compaction"}, "")) ) var ( @@ -2612,21 +2499,21 @@ var ( // RegisterWatchHandlerFromEndpoint is same as RegisterWatchHandler but // automatically dials to "endpoint" and closes the connection when "ctx" gets done. func RegisterWatchHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) + conn, err := grpc.NewClient(endpoint, opts...) if err != nil { return err } defer func() { if err != nil { if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr) } return } go func() { <-ctx.Done() if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr) } }() }() @@ -2644,26 +2531,39 @@ func RegisterWatchHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc // to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "WatchClient". // Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "WatchClient" // doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "WatchClient" to call the correct interceptors. +// "WatchClient" to call the correct interceptors. This client ignores the HTTP middlewares. func RegisterWatchHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.WatchClient) error { mux.Handle("POST", pattern_Watch_Watch_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Watch/Watch", runtime.WithHTTPPathPattern("/v3/watch")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Watch_Watch_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + + resp, md, reqErrChan, err := request_Watch_Watch_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + go func() { + for err := range reqErrChan { + if err != nil && err != io.EOF { + runtime.HTTPStreamError(annotatedContext, mux, outboundMarshaler, w, req, err) + } + } + }() - forward_Watch_Watch_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + forward_Watch_Watch_0(annotatedContext, mux, outboundMarshaler, w, req, func() (proto.Message, error) { + m1, err := resp.Recv() + return protov1.MessageV2(m1), err + }, mux.GetForwardResponseOptions()...) }) @@ -2671,7 +2571,7 @@ func RegisterWatchHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie } var ( - pattern_Watch_Watch_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v3", "watch"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Watch_Watch_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v3", "watch"}, "")) ) var ( @@ -2681,21 +2581,21 @@ var ( // RegisterLeaseHandlerFromEndpoint is same as RegisterLeaseHandler but // automatically dials to "endpoint" and closes the connection when "ctx" gets done. func RegisterLeaseHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) + conn, err := grpc.NewClient(endpoint, opts...) if err != nil { return err } defer func() { if err != nil { if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr) } return } go func() { <-ctx.Done() if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr) } }() }() @@ -2713,26 +2613,28 @@ func RegisterLeaseHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc // to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "LeaseClient". // Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "LeaseClient" // doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "LeaseClient" to call the correct interceptors. +// "LeaseClient" to call the correct interceptors. This client ignores the HTTP middlewares. func RegisterLeaseHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.LeaseClient) error { mux.Handle("POST", pattern_Lease_LeaseGrant_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Lease/LeaseGrant", runtime.WithHTTPPathPattern("/v3/lease/grant")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Lease_LeaseGrant_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_Lease_LeaseGrant_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Lease_LeaseGrant_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Lease_LeaseGrant_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -2740,19 +2642,21 @@ func RegisterLeaseHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Lease/LeaseRevoke", runtime.WithHTTPPathPattern("/v3/lease/revoke")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Lease_LeaseRevoke_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_Lease_LeaseRevoke_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Lease_LeaseRevoke_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Lease_LeaseRevoke_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -2760,19 +2664,21 @@ func RegisterLeaseHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Lease/LeaseRevoke", runtime.WithHTTPPathPattern("/v3/kv/lease/revoke")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Lease_LeaseRevoke_1(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_Lease_LeaseRevoke_1(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Lease_LeaseRevoke_1(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Lease_LeaseRevoke_1(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -2780,19 +2686,32 @@ func RegisterLeaseHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Lease/LeaseKeepAlive", runtime.WithHTTPPathPattern("/v3/lease/keepalive")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Lease_LeaseKeepAlive_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + + resp, md, reqErrChan, err := request_Lease_LeaseKeepAlive_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } + go func() { + for err := range reqErrChan { + if err != nil && err != io.EOF { + runtime.HTTPStreamError(annotatedContext, mux, outboundMarshaler, w, req, err) + } + } + }() - forward_Lease_LeaseKeepAlive_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + forward_Lease_LeaseKeepAlive_0(annotatedContext, mux, outboundMarshaler, w, req, func() (proto.Message, error) { + m1, err := resp.Recv() + return protov1.MessageV2(m1), err + }, mux.GetForwardResponseOptions()...) }) @@ -2800,19 +2719,21 @@ func RegisterLeaseHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Lease/LeaseTimeToLive", runtime.WithHTTPPathPattern("/v3/lease/timetolive")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Lease_LeaseTimeToLive_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_Lease_LeaseTimeToLive_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Lease_LeaseTimeToLive_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Lease_LeaseTimeToLive_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -2820,19 +2741,21 @@ func RegisterLeaseHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Lease/LeaseTimeToLive", runtime.WithHTTPPathPattern("/v3/kv/lease/timetolive")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Lease_LeaseTimeToLive_1(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_Lease_LeaseTimeToLive_1(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Lease_LeaseTimeToLive_1(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Lease_LeaseTimeToLive_1(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -2840,19 +2763,21 @@ func RegisterLeaseHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Lease/LeaseLeases", runtime.WithHTTPPathPattern("/v3/lease/leases")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Lease_LeaseLeases_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_Lease_LeaseLeases_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Lease_LeaseLeases_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Lease_LeaseLeases_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -2860,19 +2785,21 @@ func RegisterLeaseHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Lease/LeaseLeases", runtime.WithHTTPPathPattern("/v3/kv/lease/leases")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Lease_LeaseLeases_1(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_Lease_LeaseLeases_1(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Lease_LeaseLeases_1(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Lease_LeaseLeases_1(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -2880,21 +2807,21 @@ func RegisterLeaseHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie } var ( - pattern_Lease_LeaseGrant_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "lease", "grant"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Lease_LeaseGrant_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "lease", "grant"}, "")) - pattern_Lease_LeaseRevoke_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "lease", "revoke"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Lease_LeaseRevoke_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "lease", "revoke"}, "")) - pattern_Lease_LeaseRevoke_1 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "kv", "lease", "revoke"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Lease_LeaseRevoke_1 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "kv", "lease", "revoke"}, "")) - pattern_Lease_LeaseKeepAlive_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "lease", "keepalive"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Lease_LeaseKeepAlive_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "lease", "keepalive"}, "")) - pattern_Lease_LeaseTimeToLive_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "lease", "timetolive"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Lease_LeaseTimeToLive_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "lease", "timetolive"}, "")) - pattern_Lease_LeaseTimeToLive_1 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "kv", "lease", "timetolive"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Lease_LeaseTimeToLive_1 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "kv", "lease", "timetolive"}, "")) - pattern_Lease_LeaseLeases_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "lease", "leases"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Lease_LeaseLeases_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "lease", "leases"}, "")) - pattern_Lease_LeaseLeases_1 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "kv", "lease", "leases"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Lease_LeaseLeases_1 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "kv", "lease", "leases"}, "")) ) var ( @@ -2918,21 +2845,21 @@ var ( // RegisterClusterHandlerFromEndpoint is same as RegisterClusterHandler but // automatically dials to "endpoint" and closes the connection when "ctx" gets done. func RegisterClusterHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) + conn, err := grpc.NewClient(endpoint, opts...) if err != nil { return err } defer func() { if err != nil { if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr) } return } go func() { <-ctx.Done() if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr) } }() }() @@ -2950,26 +2877,28 @@ func RegisterClusterHandler(ctx context.Context, mux *runtime.ServeMux, conn *gr // to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "ClusterClient". // Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ClusterClient" // doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "ClusterClient" to call the correct interceptors. +// "ClusterClient" to call the correct interceptors. This client ignores the HTTP middlewares. func RegisterClusterHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.ClusterClient) error { mux.Handle("POST", pattern_Cluster_MemberAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Cluster/MemberAdd", runtime.WithHTTPPathPattern("/v3/cluster/member/add")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Cluster_MemberAdd_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_Cluster_MemberAdd_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Cluster_MemberAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Cluster_MemberAdd_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -2977,19 +2906,21 @@ func RegisterClusterHandlerClient(ctx context.Context, mux *runtime.ServeMux, cl ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Cluster/MemberRemove", runtime.WithHTTPPathPattern("/v3/cluster/member/remove")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Cluster_MemberRemove_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_Cluster_MemberRemove_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Cluster_MemberRemove_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Cluster_MemberRemove_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -2997,19 +2928,21 @@ func RegisterClusterHandlerClient(ctx context.Context, mux *runtime.ServeMux, cl ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Cluster/MemberUpdate", runtime.WithHTTPPathPattern("/v3/cluster/member/update")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Cluster_MemberUpdate_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_Cluster_MemberUpdate_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Cluster_MemberUpdate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Cluster_MemberUpdate_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -3017,19 +2950,21 @@ func RegisterClusterHandlerClient(ctx context.Context, mux *runtime.ServeMux, cl ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Cluster/MemberList", runtime.WithHTTPPathPattern("/v3/cluster/member/list")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Cluster_MemberList_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_Cluster_MemberList_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Cluster_MemberList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Cluster_MemberList_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -3037,19 +2972,21 @@ func RegisterClusterHandlerClient(ctx context.Context, mux *runtime.ServeMux, cl ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Cluster/MemberPromote", runtime.WithHTTPPathPattern("/v3/cluster/member/promote")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Cluster_MemberPromote_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_Cluster_MemberPromote_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Cluster_MemberPromote_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Cluster_MemberPromote_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -3057,15 +2994,15 @@ func RegisterClusterHandlerClient(ctx context.Context, mux *runtime.ServeMux, cl } var ( - pattern_Cluster_MemberAdd_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "cluster", "member", "add"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Cluster_MemberAdd_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "cluster", "member", "add"}, "")) - pattern_Cluster_MemberRemove_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "cluster", "member", "remove"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Cluster_MemberRemove_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "cluster", "member", "remove"}, "")) - pattern_Cluster_MemberUpdate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "cluster", "member", "update"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Cluster_MemberUpdate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "cluster", "member", "update"}, "")) - pattern_Cluster_MemberList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "cluster", "member", "list"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Cluster_MemberList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "cluster", "member", "list"}, "")) - pattern_Cluster_MemberPromote_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "cluster", "member", "promote"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Cluster_MemberPromote_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "cluster", "member", "promote"}, "")) ) var ( @@ -3083,21 +3020,21 @@ var ( // RegisterMaintenanceHandlerFromEndpoint is same as RegisterMaintenanceHandler but // automatically dials to "endpoint" and closes the connection when "ctx" gets done. func RegisterMaintenanceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) + conn, err := grpc.NewClient(endpoint, opts...) if err != nil { return err } defer func() { if err != nil { if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr) } return } go func() { <-ctx.Done() if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr) } }() }() @@ -3115,26 +3052,28 @@ func RegisterMaintenanceHandler(ctx context.Context, mux *runtime.ServeMux, conn // to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "MaintenanceClient". // Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "MaintenanceClient" // doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "MaintenanceClient" to call the correct interceptors. +// "MaintenanceClient" to call the correct interceptors. This client ignores the HTTP middlewares. func RegisterMaintenanceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.MaintenanceClient) error { mux.Handle("POST", pattern_Maintenance_Alarm_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Maintenance/Alarm", runtime.WithHTTPPathPattern("/v3/maintenance/alarm")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Maintenance_Alarm_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_Maintenance_Alarm_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Maintenance_Alarm_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Maintenance_Alarm_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -3142,19 +3081,21 @@ func RegisterMaintenanceHandlerClient(ctx context.Context, mux *runtime.ServeMux ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Maintenance/Status", runtime.WithHTTPPathPattern("/v3/maintenance/status")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Maintenance_Status_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_Maintenance_Status_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Maintenance_Status_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Maintenance_Status_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -3162,19 +3103,21 @@ func RegisterMaintenanceHandlerClient(ctx context.Context, mux *runtime.ServeMux ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Maintenance/Defragment", runtime.WithHTTPPathPattern("/v3/maintenance/defragment")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Maintenance_Defragment_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_Maintenance_Defragment_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Maintenance_Defragment_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Maintenance_Defragment_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -3182,19 +3125,21 @@ func RegisterMaintenanceHandlerClient(ctx context.Context, mux *runtime.ServeMux ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Maintenance/Hash", runtime.WithHTTPPathPattern("/v3/maintenance/hash")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Maintenance_Hash_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_Maintenance_Hash_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Maintenance_Hash_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Maintenance_Hash_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -3202,19 +3147,21 @@ func RegisterMaintenanceHandlerClient(ctx context.Context, mux *runtime.ServeMux ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Maintenance/HashKV", runtime.WithHTTPPathPattern("/v3/maintenance/hashkv")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Maintenance_HashKV_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_Maintenance_HashKV_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Maintenance_HashKV_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Maintenance_HashKV_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -3222,19 +3169,24 @@ func RegisterMaintenanceHandlerClient(ctx context.Context, mux *runtime.ServeMux ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Maintenance/Snapshot", runtime.WithHTTPPathPattern("/v3/maintenance/snapshot")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Maintenance_Snapshot_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_Maintenance_Snapshot_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Maintenance_Snapshot_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + forward_Maintenance_Snapshot_0(annotatedContext, mux, outboundMarshaler, w, req, func() (proto.Message, error) { + m1, err := resp.Recv() + return protov1.MessageV2(m1), err + }, mux.GetForwardResponseOptions()...) }) @@ -3242,19 +3194,21 @@ func RegisterMaintenanceHandlerClient(ctx context.Context, mux *runtime.ServeMux ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Maintenance/MoveLeader", runtime.WithHTTPPathPattern("/v3/maintenance/transfer-leadership")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Maintenance_MoveLeader_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_Maintenance_MoveLeader_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Maintenance_MoveLeader_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Maintenance_MoveLeader_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -3262,19 +3216,21 @@ func RegisterMaintenanceHandlerClient(ctx context.Context, mux *runtime.ServeMux ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Maintenance/Downgrade", runtime.WithHTTPPathPattern("/v3/maintenance/downgrade")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Maintenance_Downgrade_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_Maintenance_Downgrade_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Maintenance_Downgrade_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Maintenance_Downgrade_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -3282,21 +3238,21 @@ func RegisterMaintenanceHandlerClient(ctx context.Context, mux *runtime.ServeMux } var ( - pattern_Maintenance_Alarm_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "alarm"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Maintenance_Alarm_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "alarm"}, "")) - pattern_Maintenance_Status_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "status"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Maintenance_Status_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "status"}, "")) - pattern_Maintenance_Defragment_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "defragment"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Maintenance_Defragment_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "defragment"}, "")) - pattern_Maintenance_Hash_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "hash"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Maintenance_Hash_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "hash"}, "")) - pattern_Maintenance_HashKV_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "hashkv"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Maintenance_HashKV_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "hashkv"}, "")) - pattern_Maintenance_Snapshot_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "snapshot"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Maintenance_Snapshot_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "snapshot"}, "")) - pattern_Maintenance_MoveLeader_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "transfer-leadership"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Maintenance_MoveLeader_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "transfer-leadership"}, "")) - pattern_Maintenance_Downgrade_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "downgrade"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Maintenance_Downgrade_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "downgrade"}, "")) ) var ( @@ -3320,21 +3276,21 @@ var ( // RegisterAuthHandlerFromEndpoint is same as RegisterAuthHandler but // automatically dials to "endpoint" and closes the connection when "ctx" gets done. func RegisterAuthHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) + conn, err := grpc.NewClient(endpoint, opts...) if err != nil { return err } defer func() { if err != nil { if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr) } return } go func() { <-ctx.Done() if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr) } }() }() @@ -3352,26 +3308,28 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. // to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "AuthClient". // Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "AuthClient" // doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "AuthClient" to call the correct interceptors. +// "AuthClient" to call the correct interceptors. This client ignores the HTTP middlewares. func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.AuthClient) error { mux.Handle("POST", pattern_Auth_AuthEnable_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/AuthEnable", runtime.WithHTTPPathPattern("/v3/auth/enable")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Auth_AuthEnable_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_Auth_AuthEnable_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Auth_AuthEnable_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_AuthEnable_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -3379,19 +3337,21 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/AuthDisable", runtime.WithHTTPPathPattern("/v3/auth/disable")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Auth_AuthDisable_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_Auth_AuthDisable_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Auth_AuthDisable_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_AuthDisable_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -3399,19 +3359,21 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/AuthStatus", runtime.WithHTTPPathPattern("/v3/auth/status")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Auth_AuthStatus_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_Auth_AuthStatus_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Auth_AuthStatus_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_AuthStatus_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -3419,19 +3381,21 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/Authenticate", runtime.WithHTTPPathPattern("/v3/auth/authenticate")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Auth_Authenticate_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_Auth_Authenticate_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Auth_Authenticate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_Authenticate_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -3439,19 +3403,21 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/UserAdd", runtime.WithHTTPPathPattern("/v3/auth/user/add")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Auth_UserAdd_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_Auth_UserAdd_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Auth_UserAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_UserAdd_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -3459,19 +3425,21 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/UserGet", runtime.WithHTTPPathPattern("/v3/auth/user/get")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Auth_UserGet_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_Auth_UserGet_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Auth_UserGet_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_UserGet_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -3479,19 +3447,21 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/UserList", runtime.WithHTTPPathPattern("/v3/auth/user/list")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Auth_UserList_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_Auth_UserList_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Auth_UserList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_UserList_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -3499,19 +3469,21 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/UserDelete", runtime.WithHTTPPathPattern("/v3/auth/user/delete")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Auth_UserDelete_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_Auth_UserDelete_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Auth_UserDelete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_UserDelete_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -3519,19 +3491,21 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/UserChangePassword", runtime.WithHTTPPathPattern("/v3/auth/user/changepw")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Auth_UserChangePassword_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_Auth_UserChangePassword_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Auth_UserChangePassword_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_UserChangePassword_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -3539,19 +3513,21 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/UserGrantRole", runtime.WithHTTPPathPattern("/v3/auth/user/grant")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Auth_UserGrantRole_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_Auth_UserGrantRole_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Auth_UserGrantRole_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_UserGrantRole_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -3559,19 +3535,21 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/UserRevokeRole", runtime.WithHTTPPathPattern("/v3/auth/user/revoke")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Auth_UserRevokeRole_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_Auth_UserRevokeRole_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Auth_UserRevokeRole_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_UserRevokeRole_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -3579,19 +3557,21 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/RoleAdd", runtime.WithHTTPPathPattern("/v3/auth/role/add")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Auth_RoleAdd_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_Auth_RoleAdd_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Auth_RoleAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_RoleAdd_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -3599,19 +3579,21 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/RoleGet", runtime.WithHTTPPathPattern("/v3/auth/role/get")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Auth_RoleGet_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_Auth_RoleGet_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Auth_RoleGet_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_RoleGet_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -3619,19 +3601,21 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/RoleList", runtime.WithHTTPPathPattern("/v3/auth/role/list")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Auth_RoleList_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_Auth_RoleList_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Auth_RoleList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_RoleList_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -3639,19 +3623,21 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/RoleDelete", runtime.WithHTTPPathPattern("/v3/auth/role/delete")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Auth_RoleDelete_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_Auth_RoleDelete_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Auth_RoleDelete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_RoleDelete_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -3659,19 +3645,21 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/RoleGrantPermission", runtime.WithHTTPPathPattern("/v3/auth/role/grant")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Auth_RoleGrantPermission_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_Auth_RoleGrantPermission_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Auth_RoleGrantPermission_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_RoleGrantPermission_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -3679,19 +3667,21 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/RoleRevokePermission", runtime.WithHTTPPathPattern("/v3/auth/role/revoke")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Auth_RoleRevokePermission_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_Auth_RoleRevokePermission_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Auth_RoleRevokePermission_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_RoleRevokePermission_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -3699,39 +3689,39 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien } var ( - pattern_Auth_AuthEnable_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "auth", "enable"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Auth_AuthEnable_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "auth", "enable"}, "")) - pattern_Auth_AuthDisable_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "auth", "disable"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Auth_AuthDisable_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "auth", "disable"}, "")) - pattern_Auth_AuthStatus_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "auth", "status"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Auth_AuthStatus_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "auth", "status"}, "")) - pattern_Auth_Authenticate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "auth", "authenticate"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Auth_Authenticate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "auth", "authenticate"}, "")) - pattern_Auth_UserAdd_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "add"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Auth_UserAdd_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "add"}, "")) - pattern_Auth_UserGet_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "get"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Auth_UserGet_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "get"}, "")) - pattern_Auth_UserList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "list"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Auth_UserList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "list"}, "")) - pattern_Auth_UserDelete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "delete"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Auth_UserDelete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "delete"}, "")) - pattern_Auth_UserChangePassword_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "changepw"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Auth_UserChangePassword_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "changepw"}, "")) - pattern_Auth_UserGrantRole_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "grant"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Auth_UserGrantRole_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "grant"}, "")) - pattern_Auth_UserRevokeRole_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "revoke"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Auth_UserRevokeRole_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "revoke"}, "")) - pattern_Auth_RoleAdd_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "role", "add"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Auth_RoleAdd_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "role", "add"}, "")) - pattern_Auth_RoleGet_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "role", "get"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Auth_RoleGet_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "role", "get"}, "")) - pattern_Auth_RoleList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "role", "list"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Auth_RoleList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "role", "list"}, "")) - pattern_Auth_RoleDelete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "role", "delete"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Auth_RoleDelete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "role", "delete"}, "")) - pattern_Auth_RoleGrantPermission_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "role", "grant"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Auth_RoleGrantPermission_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "role", "grant"}, "")) - pattern_Auth_RoleRevokePermission_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "role", "revoke"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Auth_RoleRevokePermission_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "role", "revoke"}, "")) ) var ( diff --git a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal.pb.go b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal.pb.go index b94a7bfd9d967..d39efef7c98a8 100644 --- a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal.pb.go +++ b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal.pb.go @@ -12,6 +12,7 @@ import ( _ "github.com/gogo/protobuf/gogoproto" proto "github.com/golang/protobuf/proto" membershippb "go.etcd.io/etcd/api/v3/membershippb" + _ "go.etcd.io/etcd/api/v3/versionpb" ) // Reference imports to suppress errors if they are not otherwise used. @@ -237,70 +238,74 @@ func init() { func init() { proto.RegisterFile("raft_internal.proto", fileDescriptor_b4c9a9be0cfca103) } var fileDescriptor_b4c9a9be0cfca103 = []byte{ - // 1003 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x96, 0xd9, 0x72, 0x1b, 0x45, - 0x14, 0x86, 0x23, 0xc5, 0x71, 0xac, 0x96, 0xed, 0x38, 0x6d, 0x87, 0x34, 0x72, 0x95, 0x70, 0x1c, - 0x12, 0xcc, 0x66, 0x53, 0xca, 0x03, 0x80, 0x90, 0x5c, 0x8e, 0xab, 0x42, 0x70, 0x4d, 0xcc, 0x52, - 0xc5, 0xc5, 0xd0, 0x9a, 0x39, 0x96, 0x06, 0xcf, 0x46, 0x77, 0x4b, 0x31, 0xef, 0x11, 0x28, 0x1e, - 0x83, 0xed, 0x21, 0x72, 0xc1, 0x62, 0xe0, 0x05, 0xc0, 0xdc, 0x70, 0x0f, 0xdc, 0x53, 0xbd, 0xcc, - 0x26, 0xb5, 0x7c, 0xa7, 0xf9, 0xcf, 0x7f, 0xbe, 0x73, 0xba, 0xe7, 0xf4, 0xa8, 0xd1, 0x3a, 0xa3, - 0x27, 0xc2, 0x0d, 0x62, 0x01, 0x2c, 0xa6, 0xe1, 0x6e, 0xca, 0x12, 0x91, 0xe0, 0x65, 0x10, 0x9e, - 0xcf, 0x81, 0x4d, 0x80, 0xa5, 0x83, 0xd6, 0xc6, 0x30, 0x19, 0x26, 0x2a, 0xb0, 0x27, 0x7f, 0x69, - 0x4f, 0x6b, 0xad, 0xf0, 0x18, 0xa5, 0xc1, 0x52, 0xcf, 0xfc, 0xbc, 0x2f, 0x83, 0x7b, 0x34, 0x0d, - 0xf6, 0x22, 0x88, 0x06, 0xc0, 0xf8, 0x28, 0x48, 0xd3, 0x41, 0xe9, 0x41, 0xfb, 0xb6, 0x3f, 0x45, - 0x2b, 0x0e, 0x7c, 0x3e, 0x06, 0x2e, 0x1e, 0x02, 0xf5, 0x81, 0xe1, 0x55, 0x54, 0x3f, 0xec, 0x93, - 0xda, 0x56, 0x6d, 0x67, 0xc1, 0xa9, 0x1f, 0xf6, 0x71, 0x0b, 0x2d, 0x8d, 0xb9, 0x6c, 0x2d, 0x02, - 0x52, 0xdf, 0xaa, 0xed, 0x34, 0x9c, 0xfc, 0x19, 0xdf, 0x45, 0x2b, 0x74, 0x2c, 0x46, 0x2e, 0x83, - 0x49, 0xc0, 0x83, 0x24, 0x26, 0x57, 0x55, 0xda, 0xb2, 0x14, 0x1d, 0xa3, 0x6d, 0x3f, 0xc3, 0x68, - 0xfd, 0xd0, 0xac, 0xce, 0xa1, 0x27, 0xc2, 0x94, 0xc3, 0x0f, 0xd0, 0xe2, 0x48, 0x95, 0x24, 0xfe, - 0x56, 0x6d, 0xa7, 0xd9, 0xd9, 0xdc, 0x2d, 0xaf, 0x79, 0xb7, 0xd2, 0x95, 0x63, 0xac, 0x33, 0xdd, - 0xdd, 0x43, 0xf5, 0x49, 0x47, 0xf5, 0xd5, 0xec, 0xdc, 0xb2, 0x02, 0x9c, 0xfa, 0xa4, 0x83, 0xdf, - 0x42, 0xd7, 0x18, 0x8d, 0x87, 0xa0, 0x1a, 0x6c, 0x76, 0x5a, 0x53, 0x4e, 0x19, 0xca, 0xec, 0xda, - 0x88, 0x5f, 0x43, 0x57, 0xd3, 0xb1, 0x20, 0x0b, 0xca, 0x4f, 0xaa, 0xfe, 0xa3, 0x71, 0xb6, 0x08, - 0x47, 0x9a, 0x70, 0x0f, 0x2d, 0xfb, 0x10, 0x82, 0x00, 0x57, 0x17, 0xb9, 0xa6, 0x92, 0xb6, 0xaa, - 0x49, 0x7d, 0xe5, 0xa8, 0x94, 0x6a, 0xfa, 0x85, 0x26, 0x0b, 0x8a, 0xb3, 0x98, 0x2c, 0xda, 0x0a, - 0x1e, 0x9f, 0xc5, 0x79, 0x41, 0x71, 0x16, 0xe3, 0xb7, 0x11, 0xf2, 0x92, 0x28, 0xa5, 0x9e, 0x90, - 0x9b, 0x7e, 0x5d, 0xa5, 0xbc, 0x54, 0x4d, 0xe9, 0xe5, 0xf1, 0x2c, 0xb3, 0x94, 0x82, 0xdf, 0x41, - 0xcd, 0x10, 0x28, 0x07, 0x77, 0xc8, 0x68, 0x2c, 0xc8, 0x92, 0x8d, 0xf0, 0x48, 0x1a, 0x0e, 0x64, - 0x3c, 0x27, 0x84, 0xb9, 0x24, 0xd7, 0xac, 0x09, 0x0c, 0x26, 0xc9, 0x29, 0x90, 0x86, 0x6d, 0xcd, - 0x0a, 0xe1, 0x28, 0x43, 0xbe, 0xe6, 0xb0, 0xd0, 0xe4, 0x6b, 0xa1, 0x21, 0x65, 0x11, 0x41, 0xb6, - 0xd7, 0xd2, 0x95, 0xa1, 0xfc, 0xb5, 0x28, 0x23, 0x7e, 0x1f, 0xad, 0xe9, 0xb2, 0xde, 0x08, 0xbc, - 0xd3, 0x34, 0x09, 0x62, 0x41, 0x9a, 0x2a, 0xf9, 0x65, 0x4b, 0xe9, 0x5e, 0x6e, 0xca, 0x30, 0x37, - 0xc2, 0xaa, 0x8e, 0xbb, 0xa8, 0xa9, 0x46, 0x18, 0x62, 0x3a, 0x08, 0x81, 0xfc, 0x6d, 0xdd, 0xcc, - 0xee, 0x58, 0x8c, 0xf6, 0x95, 0x21, 0xdf, 0x0a, 0x9a, 0x4b, 0xb8, 0x8f, 0xd4, 0xc0, 0xbb, 0x7e, - 0xc0, 0x15, 0xe3, 0x9f, 0xeb, 0xb6, 0xbd, 0x90, 0x8c, 0xbe, 0x76, 0xe4, 0x7b, 0x41, 0x0b, 0x2d, - 0x6f, 0x84, 0x0b, 0x2a, 0xc6, 0x9c, 0xfc, 0x37, 0xb7, 0x91, 0x27, 0xca, 0x50, 0x69, 0x44, 0x4b, - 0xf8, 0xb1, 0x6e, 0x04, 0x62, 0x11, 0x78, 0x54, 0x00, 0xf9, 0x57, 0x33, 0x5e, 0xad, 0x32, 0xb2, - 0xb3, 0xd8, 0x2d, 0x59, 0x33, 0x5a, 0x25, 0x1f, 0xef, 0x9b, 0xe3, 0x2d, 0xcf, 0xbb, 0x4b, 0x7d, - 0x9f, 0xfc, 0xb8, 0x34, 0x6f, 0x65, 0x1f, 0x70, 0x60, 0x5d, 0xdf, 0xaf, 0xac, 0xcc, 0x68, 0xf8, - 0x31, 0x5a, 0x2b, 0x30, 0x7a, 0xe4, 0xc9, 0x4f, 0x9a, 0x74, 0xd7, 0x4e, 0x32, 0x67, 0xc5, 0xc0, - 0x56, 0x69, 0x45, 0xae, 0xb6, 0x35, 0x04, 0x41, 0x7e, 0xbe, 0xb4, 0xad, 0x03, 0x10, 0x33, 0x6d, - 0x1d, 0x80, 0xc0, 0x43, 0xf4, 0x62, 0x81, 0xf1, 0x46, 0xf2, 0x10, 0xba, 0x29, 0xe5, 0xfc, 0x69, - 0xc2, 0x7c, 0xf2, 0x8b, 0x46, 0xbe, 0x6e, 0x47, 0xf6, 0x94, 0xfb, 0xc8, 0x98, 0x33, 0xfa, 0x0b, - 0xd4, 0x1a, 0xc6, 0x1f, 0xa3, 0x8d, 0x52, 0xbf, 0xf2, 0xf4, 0xb8, 0x2c, 0x09, 0x81, 0x9c, 0xeb, - 0x1a, 0xf7, 0xe7, 0xb4, 0xad, 0x4e, 0x5e, 0x52, 0x4c, 0xcb, 0x4d, 0x3a, 0x1d, 0xc1, 0x9f, 0xa0, - 0x5b, 0x05, 0x59, 0x1f, 0x44, 0x8d, 0xfe, 0x55, 0xa3, 0x5f, 0xb1, 0xa3, 0xcd, 0x89, 0x2c, 0xb1, - 0x31, 0x9d, 0x09, 0xe1, 0x87, 0x68, 0xb5, 0x80, 0x87, 0x01, 0x17, 0xe4, 0x37, 0x4d, 0xbd, 0x63, - 0xa7, 0x3e, 0x0a, 0xb8, 0xa8, 0xcc, 0x51, 0x26, 0xe6, 0x24, 0xd9, 0x9a, 0x26, 0xfd, 0x3e, 0x97, - 0x24, 0x4b, 0xcf, 0x90, 0x32, 0x31, 0x7f, 0xf5, 0x8a, 0x24, 0x27, 0xf2, 0x9b, 0xc6, 0xbc, 0x57, - 0x2f, 0x73, 0xa6, 0x27, 0xd2, 0x68, 0xf9, 0x44, 0x2a, 0x8c, 0x99, 0xc8, 0x6f, 0x1b, 0xf3, 0x26, - 0x52, 0x66, 0x59, 0x26, 0xb2, 0x90, 0xab, 0x6d, 0xc9, 0x89, 0xfc, 0xee, 0xd2, 0xb6, 0xa6, 0x27, - 0xd2, 0x68, 0xf8, 0x33, 0xd4, 0x2a, 0x61, 0xd4, 0xa0, 0xa4, 0xc0, 0xa2, 0x80, 0xab, 0xff, 0xd6, - 0xef, 0x35, 0xf3, 0x8d, 0x39, 0x4c, 0x69, 0x3f, 0xca, 0xdd, 0x19, 0xff, 0x36, 0xb5, 0xc7, 0x71, - 0x84, 0x36, 0x8b, 0x5a, 0x66, 0x74, 0x4a, 0xc5, 0x7e, 0xd0, 0xc5, 0xde, 0xb4, 0x17, 0xd3, 0x53, - 0x32, 0x5b, 0x8d, 0xd0, 0x39, 0x06, 0xfc, 0x11, 0x5a, 0xf7, 0xc2, 0x31, 0x17, 0xc0, 0xdc, 0x09, - 0x30, 0x29, 0xb9, 0x1c, 0x04, 0x79, 0x86, 0xcc, 0x11, 0x28, 0x5f, 0x52, 0x76, 0x7b, 0xda, 0xf9, - 0xa1, 0x36, 0x3e, 0x29, 0x76, 0xeb, 0xa6, 0x37, 0x1d, 0xc1, 0x14, 0xdd, 0xce, 0xc0, 0x9a, 0xe1, - 0x52, 0x21, 0x98, 0x82, 0x7f, 0x89, 0xcc, 0xe7, 0xcf, 0x06, 0x7f, 0x4f, 0x69, 0x5d, 0x21, 0x58, - 0x89, 0xbf, 0xe1, 0x59, 0x82, 0xf8, 0x18, 0x61, 0x3f, 0x79, 0x1a, 0x0f, 0x19, 0xf5, 0xc1, 0x0d, - 0xe2, 0x93, 0x44, 0xd1, 0xbf, 0xd2, 0xf4, 0x7b, 0x55, 0x7a, 0x3f, 0x33, 0x1e, 0xc6, 0x27, 0x49, - 0x89, 0xbc, 0xe6, 0x4f, 0x05, 0xb6, 0x6f, 0xa0, 0x95, 0xfd, 0x28, 0x15, 0x5f, 0x38, 0xc0, 0xd3, - 0x24, 0xe6, 0xb0, 0x9d, 0xa2, 0xcd, 0x4b, 0x3e, 0xcd, 0x18, 0xa3, 0x05, 0x75, 0x07, 0xab, 0xa9, - 0x3b, 0x98, 0xfa, 0x2d, 0xef, 0x66, 0xf9, 0x17, 0xcb, 0xdc, 0xcd, 0xb2, 0x67, 0x7c, 0x07, 0x2d, - 0xf3, 0x20, 0x4a, 0x43, 0x70, 0x45, 0x72, 0x0a, 0xfa, 0x6a, 0xd6, 0x70, 0x9a, 0x5a, 0x3b, 0x96, - 0xd2, 0xbb, 0x1b, 0xcf, 0xff, 0x6c, 0x5f, 0x79, 0x7e, 0xd1, 0xae, 0x9d, 0x5f, 0xb4, 0x6b, 0x7f, - 0x5c, 0xb4, 0x6b, 0x5f, 0xff, 0xd5, 0xbe, 0x32, 0x58, 0x54, 0x17, 0xc3, 0x07, 0xff, 0x07, 0x00, - 0x00, 0xff, 0xff, 0x94, 0x6f, 0x64, 0x0a, 0x98, 0x0a, 0x00, 0x00, + // 1072 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x56, 0x4b, 0x73, 0x1b, 0x45, + 0x17, 0x8d, 0x6c, 0xc7, 0xb6, 0x5a, 0xb6, 0xe3, 0xb4, 0x9d, 0x2f, 0xfd, 0xd9, 0x55, 0xc6, 0x71, + 0x48, 0x30, 0x10, 0xe4, 0x20, 0xc3, 0x02, 0x36, 0xa0, 0x48, 0x2e, 0xc7, 0x54, 0x48, 0xb9, 0x26, + 0x81, 0x4a, 0x41, 0x51, 0x43, 0x6b, 0xe6, 0x5a, 0x9a, 0x78, 0x34, 0x33, 0x74, 0xb7, 0x14, 0x67, + 0xcb, 0x92, 0x35, 0x50, 0xfc, 0x0c, 0x9e, 0xff, 0x21, 0x45, 0xf1, 0x08, 0xf0, 0x07, 0xc0, 0x6c, + 0xd8, 0x03, 0x7b, 0xaa, 0x1f, 0xf3, 0x92, 0x5a, 0xde, 0x8d, 0xee, 0x3d, 0xf7, 0x9c, 0xd3, 0xdd, + 0xb7, 0x5b, 0x17, 0xad, 0x30, 0x7a, 0x24, 0xdc, 0x20, 0x12, 0xc0, 0x22, 0x1a, 0xd6, 0x13, 0x16, + 0x8b, 0x18, 0x2f, 0x80, 0xf0, 0x7c, 0x0e, 0x6c, 0x08, 0x2c, 0xe9, 0xac, 0xad, 0x76, 0xe3, 0x6e, + 0xac, 0x12, 0x3b, 0xf2, 0x4b, 0x63, 0xd6, 0x96, 0x73, 0x8c, 0x89, 0x54, 0x59, 0xe2, 0x99, 0xcf, + 0x4d, 0x99, 0xdc, 0xa1, 0x49, 0xb0, 0x33, 0x04, 0xc6, 0x83, 0x38, 0x4a, 0x3a, 0xe9, 0x97, 0x41, + 0x5c, 0xcf, 0x10, 0x7d, 0xe8, 0x77, 0x80, 0xf1, 0x5e, 0x90, 0x24, 0x9d, 0xc2, 0x0f, 0x8d, 0xdb, + 0x62, 0x68, 0xd1, 0x81, 0x8f, 0x06, 0xc0, 0xc5, 0x6d, 0xa0, 0x3e, 0x30, 0xbc, 0x84, 0xa6, 0x0e, + 0xda, 0xa4, 0xb2, 0x59, 0xd9, 0x9e, 0x71, 0xa6, 0x0e, 0xda, 0x78, 0x0d, 0xcd, 0x0f, 0xb8, 0x34, + 0xdf, 0x07, 0x32, 0xb5, 0x59, 0xd9, 0xae, 0x3a, 0xd9, 0x6f, 0x7c, 0x03, 0x2d, 0xd2, 0x81, 0xe8, + 0xb9, 0x0c, 0x86, 0x81, 0xd4, 0x26, 0xd3, 0xb2, 0xec, 0xd6, 0xdc, 0x27, 0xdf, 0x91, 0xe9, 0xdd, + 0xfa, 0xcb, 0xce, 0x82, 0xcc, 0x3a, 0x26, 0xf9, 0xfa, 0xdc, 0xc7, 0x2a, 0x7c, 0x73, 0xeb, 0x7b, + 0x8c, 0x56, 0x0e, 0xcc, 0x8e, 0x38, 0xf4, 0x48, 0x18, 0x03, 0x78, 0x17, 0xcd, 0xf6, 0x94, 0x09, + 0xe2, 0x6f, 0x56, 0xb6, 0x6b, 0x8d, 0xf5, 0x7a, 0x71, 0x9f, 0xea, 0x25, 0x9f, 0x8e, 0x81, 0x8e, + 0xf9, 0xbd, 0x86, 0xa6, 0x86, 0x0d, 0xe5, 0xb4, 0xd6, 0xb8, 0x64, 0x25, 0x70, 0xa6, 0x86, 0x0d, + 0x7c, 0x13, 0x9d, 0x67, 0x34, 0xea, 0x82, 0xb2, 0x5c, 0x6b, 0xac, 0x8d, 0x20, 0x65, 0x2a, 0x85, + 0x6b, 0x20, 0x7e, 0x01, 0x4d, 0x27, 0x03, 0x41, 0x66, 0x14, 0x9e, 0x94, 0xf1, 0x87, 0x83, 0x74, + 0x11, 0x8e, 0x04, 0xe1, 0x16, 0x5a, 0xf0, 0x21, 0x04, 0x01, 0xae, 0x16, 0x39, 0xaf, 0x8a, 0x36, + 0xcb, 0x45, 0x6d, 0x85, 0x28, 0x49, 0xd5, 0xfc, 0x3c, 0x26, 0x05, 0xc5, 0x49, 0x44, 0x66, 0x6d, + 0x82, 0xf7, 0x4f, 0xa2, 0x4c, 0x50, 0x9c, 0x44, 0xf8, 0x0d, 0x84, 0xbc, 0xb8, 0x9f, 0x50, 0x4f, + 0xc8, 0x63, 0x98, 0x53, 0x25, 0xcf, 0x94, 0x4b, 0x5a, 0x59, 0x3e, 0xad, 0x2c, 0x94, 0xe0, 0x37, + 0x51, 0x2d, 0x04, 0xca, 0xc1, 0xed, 0x32, 0x1a, 0x09, 0x32, 0x6f, 0x63, 0xb8, 0x23, 0x01, 0xfb, + 0x32, 0x9f, 0x31, 0x84, 0x59, 0x48, 0xae, 0x59, 0x33, 0x30, 0x18, 0xc6, 0xc7, 0x40, 0xaa, 0xb6, + 0x35, 0x2b, 0x0a, 0x47, 0x01, 0xb2, 0x35, 0x87, 0x79, 0x4c, 0x1e, 0x0b, 0x0d, 0x29, 0xeb, 0x13, + 0x64, 0x3b, 0x96, 0xa6, 0x4c, 0x65, 0xc7, 0xa2, 0x80, 0xf8, 0x01, 0x5a, 0xd6, 0xb2, 0x5e, 0x0f, + 0xbc, 0xe3, 0x24, 0x0e, 0x22, 0x41, 0x6a, 0xaa, 0xf8, 0x59, 0x8b, 0x74, 0x2b, 0x03, 0x19, 0x9a, + 0xb4, 0x59, 0x5f, 0x71, 0x2e, 0x84, 0x65, 0x00, 0x6e, 0xa2, 0x9a, 0xea, 0x6e, 0x88, 0x68, 0x27, + 0x04, 0xf2, 0x97, 0x75, 0x57, 0x9b, 0x03, 0xd1, 0xdb, 0x53, 0x80, 0x6c, 0x4f, 0x68, 0x16, 0xc2, + 0x6d, 0xa4, 0xae, 0x80, 0xeb, 0x07, 0x5c, 0x71, 0xfc, 0x3d, 0x67, 0xdb, 0x14, 0xc9, 0xd1, 0xd6, + 0x88, 0x6c, 0x53, 0x68, 0x1e, 0xc3, 0x6f, 0x19, 0x23, 0x5c, 0x50, 0x31, 0xe0, 0xe4, 0xdf, 0x89, + 0x46, 0xee, 0x29, 0xc0, 0xc8, 0xca, 0x5e, 0xd5, 0x8e, 0x74, 0x0e, 0xdf, 0xd5, 0x8e, 0x20, 0x12, + 0x81, 0x47, 0x05, 0x90, 0x7f, 0x34, 0xd9, 0xf3, 0x65, 0xb2, 0xf4, 0x76, 0x36, 0x0b, 0xd0, 0xd4, + 0x5a, 0xa9, 0x1e, 0xef, 0x99, 0x27, 0x40, 0xbe, 0x09, 0x2e, 0xf5, 0x7d, 0xf2, 0xc3, 0xfc, 0xa4, + 0x25, 0xbe, 0xc3, 0x81, 0x35, 0x7d, 0xbf, 0xb4, 0x44, 0x13, 0xc3, 0x77, 0xd1, 0x72, 0x4e, 0xa3, + 0x2f, 0x01, 0xf9, 0x51, 0x33, 0x5d, 0xb5, 0x33, 0x99, 0xdb, 0x63, 0xc8, 0x96, 0x68, 0x29, 0x5c, + 0xb6, 0xd5, 0x05, 0x41, 0x7e, 0x3a, 0xd3, 0xd6, 0x3e, 0x88, 0x31, 0x5b, 0xfb, 0x20, 0x70, 0x17, + 0xfd, 0x3f, 0xa7, 0xf1, 0x7a, 0xf2, 0x5a, 0xba, 0x09, 0xe5, 0xfc, 0x51, 0xcc, 0x7c, 0xf2, 0xb3, + 0xa6, 0x7c, 0xd1, 0x4e, 0xd9, 0x52, 0xe8, 0x43, 0x03, 0x4e, 0xd9, 0xff, 0x47, 0xad, 0x69, 0xfc, + 0x00, 0xad, 0x16, 0xfc, 0xca, 0xfb, 0xe4, 0xb2, 0x38, 0x04, 0xf2, 0x54, 0x6b, 0x5c, 0x9f, 0x60, + 0x5b, 0xdd, 0xc5, 0x38, 0x6f, 0x9b, 0x8b, 0x74, 0x34, 0x83, 0xdf, 0x47, 0x97, 0x72, 0x66, 0x7d, + 0x35, 0x35, 0xf5, 0x2f, 0x9a, 0xfa, 0x39, 0x3b, 0xb5, 0xb9, 0xa3, 0x05, 0x6e, 0x4c, 0xc7, 0x52, + 0xf8, 0x36, 0x5a, 0xca, 0xc9, 0xc3, 0x80, 0x0b, 0xf2, 0xab, 0x66, 0xbd, 0x62, 0x67, 0xbd, 0x13, + 0x70, 0x51, 0xea, 0xa3, 0x34, 0x98, 0x31, 0x49, 0x6b, 0x9a, 0xe9, 0xb7, 0x89, 0x4c, 0x52, 0x7a, + 0x8c, 0x29, 0x0d, 0x66, 0x47, 0xaf, 0x98, 0x64, 0x47, 0x7e, 0x59, 0x9d, 0x74, 0xf4, 0xb2, 0x66, + 0xb4, 0x23, 0x4d, 0x2c, 0xeb, 0x48, 0x45, 0x63, 0x3a, 0xf2, 0xab, 0xea, 0xa4, 0x8e, 0x94, 0x55, + 0x96, 0x8e, 0xcc, 0xc3, 0x65, 0x5b, 0xb2, 0x23, 0xbf, 0x3e, 0xd3, 0xd6, 0x68, 0x47, 0x9a, 0x18, + 0x7e, 0x88, 0xd6, 0x0a, 0x34, 0xaa, 0x51, 0x12, 0x60, 0xfd, 0x80, 0xab, 0xff, 0xdf, 0x6f, 0x34, + 0xe7, 0x8d, 0x09, 0x9c, 0x12, 0x7e, 0x98, 0xa1, 0x53, 0xfe, 0xcb, 0xd4, 0x9e, 0xc7, 0x7d, 0xb4, + 0x9e, 0x6b, 0x99, 0xd6, 0x29, 0x88, 0x7d, 0xab, 0xc5, 0x5e, 0xb2, 0x8b, 0xe9, 0x2e, 0x19, 0x57, + 0x23, 0x74, 0x02, 0x00, 0x7f, 0x88, 0x56, 0xbc, 0x70, 0xc0, 0x05, 0x30, 0xd7, 0xcc, 0x32, 0x2e, + 0x07, 0x41, 0x3e, 0x45, 0xe6, 0x0a, 0x14, 0x07, 0x99, 0x7a, 0x4b, 0x23, 0xdf, 0xd5, 0xc0, 0x7b, + 0x20, 0xc6, 0x5e, 0xbd, 0x8b, 0xde, 0x28, 0x04, 0x3f, 0x44, 0x97, 0x53, 0x05, 0x4d, 0xe6, 0x52, + 0x21, 0x98, 0x52, 0xf9, 0x0c, 0x99, 0x77, 0xd0, 0xa6, 0xf2, 0xb6, 0x8a, 0x35, 0x85, 0x60, 0x36, + 0xa1, 0x55, 0xcf, 0x82, 0xc2, 0x1f, 0x20, 0xec, 0xc7, 0x8f, 0xa2, 0x2e, 0xa3, 0x3e, 0xb8, 0x41, + 0x74, 0x14, 0x2b, 0x99, 0xcf, 0xb5, 0xcc, 0xb5, 0xb2, 0x4c, 0x3b, 0x05, 0x1e, 0x44, 0x47, 0xb1, + 0x4d, 0x62, 0xd9, 0x1f, 0x41, 0xe4, 0xc3, 0xd4, 0x05, 0xb4, 0xb8, 0xd7, 0x4f, 0xc4, 0x63, 0x07, + 0x78, 0x12, 0x47, 0x1c, 0xb6, 0x1e, 0xa3, 0xf5, 0x33, 0x9e, 0x6f, 0x8c, 0xd1, 0x8c, 0x9a, 0xe5, + 0x2a, 0x6a, 0x96, 0x53, 0xdf, 0x72, 0xc6, 0xcb, 0x5e, 0x35, 0x33, 0xe3, 0xa5, 0xbf, 0xf1, 0x15, + 0xb4, 0xc0, 0x83, 0x7e, 0x12, 0x82, 0x2b, 0xe2, 0x63, 0xd0, 0x23, 0x5e, 0xd5, 0xa9, 0xe9, 0xd8, + 0x7d, 0x19, 0xca, 0xbc, 0xdc, 0x7a, 0xed, 0xc9, 0x1f, 0x1b, 0xe7, 0x9e, 0x9c, 0x6e, 0x54, 0x9e, + 0x9e, 0x6e, 0x54, 0x7e, 0x3f, 0xdd, 0xa8, 0x7c, 0xf1, 0xe7, 0xc6, 0xb9, 0xf7, 0xae, 0x76, 0x63, + 0xd5, 0x22, 0xf5, 0x20, 0xde, 0xc9, 0xe7, 0xd6, 0xdd, 0x9d, 0x62, 0xdb, 0x74, 0x66, 0xd5, 0x38, + 0xba, 0xfb, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x06, 0x59, 0x70, 0x10, 0x30, 0x0b, 0x00, 0x00, } func (m *RequestHeader) Marshal() (dAtA []byte, err error) { diff --git a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal.proto b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal.proto index 68926e59f6c95..91f7705b739fb 100644 --- a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal.proto +++ b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal.proto @@ -4,24 +4,31 @@ package etcdserverpb; import "gogoproto/gogo.proto"; import "etcdserver.proto"; import "rpc.proto"; +import "etcd/api/versionpb/version.proto"; import "etcd/api/membershippb/membership.proto"; +option go_package = "go.etcd.io/etcd/api/v3/etcdserverpb"; + option (gogoproto.marshaler_all) = true; option (gogoproto.sizer_all) = true; option (gogoproto.unmarshaler_all) = true; option (gogoproto.goproto_getters_all) = false; message RequestHeader { + option (versionpb.etcd_version_msg) = "3.0"; + uint64 ID = 1; // username is a username that is associated with an auth token of gRPC connection string username = 2; // auth_revision is a revision number of auth.authStore. It is not related to mvcc - uint64 auth_revision = 3; + uint64 auth_revision = 3 [(versionpb.etcd_version_field) = "3.1"]; } // An InternalRaftRequest is the union of all requests which can be // sent via raft. message InternalRaftRequest { + option (versionpb.etcd_version_msg) = "3.0"; + RequestHeader header = 100; uint64 ID = 1; @@ -38,11 +45,11 @@ message InternalRaftRequest { AlarmRequest alarm = 10; - LeaseCheckpointRequest lease_checkpoint = 11; + LeaseCheckpointRequest lease_checkpoint = 11 [(versionpb.etcd_version_field) = "3.4"]; AuthEnableRequest auth_enable = 1000; AuthDisableRequest auth_disable = 1011; - AuthStatusRequest auth_status = 1013; + AuthStatusRequest auth_status = 1013 [(versionpb.etcd_version_field) = "3.5"]; InternalAuthenticateRequest authenticate = 1012; @@ -61,9 +68,9 @@ message InternalRaftRequest { AuthRoleGrantPermissionRequest auth_role_grant_permission = 1203; AuthRoleRevokePermissionRequest auth_role_revoke_permission = 1204; - membershippb.ClusterVersionSetRequest cluster_version_set = 1300; - membershippb.ClusterMemberAttrSetRequest cluster_member_attr_set = 1301; - membershippb.DowngradeInfoSetRequest downgrade_info_set = 1302; + membershippb.ClusterVersionSetRequest cluster_version_set = 1300 [(versionpb.etcd_version_field) = "3.5"]; + membershippb.ClusterMemberAttrSetRequest cluster_member_attr_set = 1301 [(versionpb.etcd_version_field) = "3.5"]; + membershippb.DowngradeInfoSetRequest downgrade_info_set = 1302 [(versionpb.etcd_version_field) = "3.5"]; } message EmptyResponse { @@ -73,6 +80,7 @@ message EmptyResponse { // InternalAuthenticateRequest has a member that is filled by etcdserver and shouldn't be user-facing. // For avoiding misusage the field, we have an internal version of AuthenticateRequest. message InternalAuthenticateRequest { + option (versionpb.etcd_version_msg) = "3.0"; string name = 1; string password = 2; diff --git a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal_stringer.go b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal_stringer.go index 31e121ee0a63e..a9431d52542be 100644 --- a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal_stringer.go +++ b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal_stringer.go @@ -72,13 +72,13 @@ func (as *InternalRaftStringer) String() string { return as.Request.String() } -// txnRequestStringer implements a custom proto String to replace value bytes fields with value size -// fields in any nested txn and put operations. +// txnRequestStringer implements fmt.Stringer, a custom proto String to replace value bytes +// fields with value size fields in any nested txn and put operations. type txnRequestStringer struct { Request *TxnRequest } -func NewLoggableTxnRequest(request *TxnRequest) *txnRequestStringer { +func NewLoggableTxnRequest(request *TxnRequest) fmt.Stringer { return &txnRequestStringer{request} } @@ -155,8 +155,8 @@ func (m *loggableValueCompare) Reset() { *m = loggableValueCompare{} } func (m *loggableValueCompare) String() string { return proto.CompactTextString(m) } func (*loggableValueCompare) ProtoMessage() {} -// loggablePutRequest implements a custom proto String to replace value bytes field with a value -// size field. +// loggablePutRequest implements proto.Message, a custom proto String to replace value bytes +// field with a value size field. // To preserve proto encoding of the key bytes, a faked out proto type is used here. type loggablePutRequest struct { Key []byte `protobuf:"bytes,1,opt,name=key,proto3"` @@ -167,7 +167,7 @@ type loggablePutRequest struct { IgnoreLease bool `protobuf:"varint,6,opt,name=ignore_lease,proto3"` } -func NewLoggablePutRequest(request *PutRequest) *loggablePutRequest { +func NewLoggablePutRequest(request *PutRequest) proto.Message { return &loggablePutRequest{ request.Key, int64(len(request.Value)), diff --git a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/rpc.pb.go b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/rpc.pb.go index 8120907d91dee..87aec5c015a27 100644 --- a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/rpc.pb.go +++ b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/rpc.pb.go @@ -12,8 +12,10 @@ import ( _ "github.com/gogo/protobuf/gogoproto" proto "github.com/golang/protobuf/proto" + _ "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options" authpb "go.etcd.io/etcd/api/v3/authpb" mvccpb "go.etcd.io/etcd/api/v3/mvccpb" + _ "go.etcd.io/etcd/api/v3/versionpb" _ "google.golang.org/genproto/googleapis/api/annotations" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" @@ -274,9 +276,10 @@ type ResponseHeader struct { ClusterId uint64 `protobuf:"varint,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // member_id is the ID of the member which sent the response. MemberId uint64 `protobuf:"varint,2,opt,name=member_id,json=memberId,proto3" json:"member_id,omitempty"` - // revision is the key-value store revision when the request was applied. + // revision is the key-value store revision when the request was applied, and it's + // unset (so 0) in case of calls not interacting with key-value store. // For watch progress responses, the header.revision indicates progress. All future events - // recieved in this stream are guaranteed to have a higher revision number than the + // received in this stream are guaranteed to have a higher revision number than the // header.revision number. Revision int64 `protobuf:"varint,3,opt,name=revision,proto3" json:"revision,omitempty"` // raft_term is the raft term when the request was applied. @@ -1630,7 +1633,9 @@ type HashKVResponse struct { // hash is the hash value computed from the responding member's MVCC keys up to a given revision. Hash uint32 `protobuf:"varint,2,opt,name=hash,proto3" json:"hash,omitempty"` // compact_revision is the compacted revision of key-value store when hash begins. - CompactRevision int64 `protobuf:"varint,3,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"` + CompactRevision int64 `protobuf:"varint,3,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"` + // hash_revision is the revision up to which the hash is calculated. + HashRevision int64 `protobuf:"varint,4,opt,name=hash_revision,json=hashRevision,proto3" json:"hash_revision,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1690,6 +1695,13 @@ func (m *HashKVResponse) GetCompactRevision() int64 { return 0 } +func (m *HashKVResponse) GetHashRevision() int64 { + if m != nil { + return m.HashRevision + } + return 0 +} + type HashResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` // hash is the hash value computed from the responding member's KV's backend. @@ -1792,7 +1804,11 @@ type SnapshotResponse struct { // remaining_bytes is the number of blob bytes to be sent after this message RemainingBytes uint64 `protobuf:"varint,2,opt,name=remaining_bytes,json=remainingBytes,proto3" json:"remaining_bytes,omitempty"` // blob contains the next chunk of the snapshot in the snapshot stream. - Blob []byte `protobuf:"bytes,3,opt,name=blob,proto3" json:"blob,omitempty"` + Blob []byte `protobuf:"bytes,3,opt,name=blob,proto3" json:"blob,omitempty"` + // local version of server that created the snapshot. + // In cluster with binaries with different version, each cluster can return different result. + // Informs which etcd server version should be used when restoring the snapshot. + Version string `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1852,6 +1868,13 @@ func (m *SnapshotResponse) GetBlob() []byte { return nil } +func (m *SnapshotResponse) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + type WatchRequest struct { // request_union is a request to either create a new watcher or cancel an existing watcher. // @@ -2176,7 +2199,8 @@ type WatchResponse struct { // the created watcher from the same stream. // All events sent to the created watcher will attach with the same watch_id. Created bool `protobuf:"varint,3,opt,name=created,proto3" json:"created,omitempty"` - // canceled is set to true if the response is for a cancel watch request. + // canceled is set to true if the response is for a cancel watch request + // or if the start_revision has already been compacted. // No further events will be sent to the canceled watcher. Canceled bool `protobuf:"varint,4,opt,name=canceled,proto3" json:"canceled,omitempty"` // compact_revision is set to the minimum index if a watcher tries to watch @@ -4223,7 +4247,11 @@ type StatusResponse struct { // dbSizeInUse is the size of the backend database logically in use, in bytes, of the responding member. DbSizeInUse int64 `protobuf:"varint,9,opt,name=dbSizeInUse,proto3" json:"dbSizeInUse,omitempty"` // isLearner indicates if the member is raft learner. - IsLearner bool `protobuf:"varint,10,opt,name=isLearner,proto3" json:"isLearner,omitempty"` + IsLearner bool `protobuf:"varint,10,opt,name=isLearner,proto3" json:"isLearner,omitempty"` + // storageVersion is the version of the db file. It might be get updated with delay in relationship to the target cluster version. + StorageVersion string `protobuf:"bytes,11,opt,name=storageVersion,proto3" json:"storageVersion,omitempty"` + // dbSizeQuota is the configured etcd storage quota in bytes (the value passed to etcd instance by flag --quota-backend-bytes) + DbSizeQuota int64 `protobuf:"varint,12,opt,name=dbSizeQuota,proto3" json:"dbSizeQuota,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -4332,6 +4360,20 @@ func (m *StatusResponse) GetIsLearner() bool { return false } +func (m *StatusResponse) GetStorageVersion() string { + if m != nil { + return m.StorageVersion + } + return "" +} + +func (m *StatusResponse) GetDbSizeQuota() int64 { + if m != nil { + return m.DbSizeQuota + } + return 0 +} + type AuthEnableRequest struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -6154,264 +6196,289 @@ func init() { func init() { proto.RegisterFile("rpc.proto", fileDescriptor_77a6da22d6a3feb1) } var fileDescriptor_77a6da22d6a3feb1 = []byte{ - // 4110 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x5b, 0xdd, 0x73, 0x1b, 0xc9, - 0x71, 0xe7, 0x02, 0x24, 0x01, 0x34, 0x3e, 0x08, 0x0e, 0x29, 0x0a, 0xc2, 0x49, 0x14, 0x6f, 0x74, - 0xd2, 0xf1, 0xa4, 0x3b, 0xe2, 0x4c, 0xdb, 0xb9, 0x2a, 0x25, 0x71, 0x0c, 0x91, 0x38, 0x89, 0x47, - 0x8a, 0xe4, 0x2d, 0x21, 0xdd, 0x47, 0xb9, 0xc2, 0x5a, 0x02, 0x23, 0x72, 0x43, 0x60, 0x17, 0xde, - 0x5d, 0x40, 0xe4, 0xe5, 0xc3, 0x2e, 0x97, 0xe3, 0x4a, 0x5e, 0xed, 0xaa, 0x54, 0xf2, 0x90, 0xbc, - 0xa4, 0x52, 0x2e, 0x3f, 0xf8, 0x39, 0xff, 0x42, 0x9e, 0xf2, 0x51, 0xf9, 0x07, 0x52, 0x17, 0xbf, - 0x24, 0x7f, 0x85, 0x6b, 0xbe, 0x76, 0x67, 0xf6, 0x83, 0x92, 0x8d, 0xbb, 0x7b, 0x11, 0x31, 0x3d, - 0x3d, 0xfd, 0xeb, 0xe9, 0x99, 0xe9, 0xee, 0xe9, 0x59, 0x41, 0xc9, 0x1b, 0xf5, 0x36, 0x46, 0x9e, - 0x1b, 0xb8, 0xa8, 0x42, 0x82, 0x5e, 0xdf, 0x27, 0xde, 0x84, 0x78, 0xa3, 0x93, 0xe6, 0xf2, 0xa9, - 0x7b, 0xea, 0xb2, 0x8e, 0x16, 0xfd, 0xc5, 0x79, 0x9a, 0x0d, 0xca, 0xd3, 0xb2, 0x46, 0x76, 0x6b, - 0x38, 0xe9, 0xf5, 0x46, 0x27, 0xad, 0xf3, 0x89, 0xe8, 0x69, 0x86, 0x3d, 0xd6, 0x38, 0x38, 0x1b, - 0x9d, 0xb0, 0x3f, 0xa2, 0xef, 0xe6, 0xa9, 0xeb, 0x9e, 0x0e, 0x08, 0xef, 0x75, 0x1c, 0x37, 0xb0, - 0x02, 0xdb, 0x75, 0x7c, 0xde, 0x8b, 0xff, 0xda, 0x80, 0x9a, 0x49, 0xfc, 0x91, 0xeb, 0xf8, 0xe4, - 0x09, 0xb1, 0xfa, 0xc4, 0x43, 0xb7, 0x00, 0x7a, 0x83, 0xb1, 0x1f, 0x10, 0xef, 0xd8, 0xee, 0x37, - 0x8c, 0x35, 0x63, 0x7d, 0xd6, 0x2c, 0x09, 0xca, 0x4e, 0x1f, 0xbd, 0x01, 0xa5, 0x21, 0x19, 0x9e, - 0xf0, 0xde, 0x1c, 0xeb, 0x2d, 0x72, 0xc2, 0x4e, 0x1f, 0x35, 0xa1, 0xe8, 0x91, 0x89, 0xed, 0xdb, - 0xae, 0xd3, 0xc8, 0xaf, 0x19, 0xeb, 0x79, 0x33, 0x6c, 0xd3, 0x81, 0x9e, 0xf5, 0x22, 0x38, 0x0e, - 0x88, 0x37, 0x6c, 0xcc, 0xf2, 0x81, 0x94, 0xd0, 0x25, 0xde, 0x10, 0xff, 0x74, 0x0e, 0x2a, 0xa6, - 0xe5, 0x9c, 0x12, 0x93, 0xfc, 0x70, 0x4c, 0xfc, 0x00, 0xd5, 0x21, 0x7f, 0x4e, 0x2e, 0x19, 0x7c, - 0xc5, 0xa4, 0x3f, 0xf9, 0x78, 0xe7, 0x94, 0x1c, 0x13, 0x87, 0x03, 0x57, 0xe8, 0x78, 0xe7, 0x94, - 0x74, 0x9c, 0x3e, 0x5a, 0x86, 0xb9, 0x81, 0x3d, 0xb4, 0x03, 0x81, 0xca, 0x1b, 0x9a, 0x3a, 0xb3, - 0x31, 0x75, 0xb6, 0x00, 0x7c, 0xd7, 0x0b, 0x8e, 0x5d, 0xaf, 0x4f, 0xbc, 0xc6, 0xdc, 0x9a, 0xb1, - 0x5e, 0xdb, 0x7c, 0x6b, 0x43, 0x5d, 0x86, 0x0d, 0x55, 0xa1, 0x8d, 0x23, 0xd7, 0x0b, 0x0e, 0x28, - 0xaf, 0x59, 0xf2, 0xe5, 0x4f, 0xf4, 0x21, 0x94, 0x99, 0x90, 0xc0, 0xf2, 0x4e, 0x49, 0xd0, 0x98, - 0x67, 0x52, 0xee, 0xbe, 0x42, 0x4a, 0x97, 0x31, 0x9b, 0x0c, 0x9e, 0xff, 0x46, 0x18, 0x2a, 0x3e, - 0xf1, 0x6c, 0x6b, 0x60, 0x7f, 0x61, 0x9d, 0x0c, 0x48, 0xa3, 0xb0, 0x66, 0xac, 0x17, 0x4d, 0x8d, - 0x46, 0xe7, 0x7f, 0x4e, 0x2e, 0xfd, 0x63, 0xd7, 0x19, 0x5c, 0x36, 0x8a, 0x8c, 0xa1, 0x48, 0x09, - 0x07, 0xce, 0xe0, 0x92, 0x2d, 0x9a, 0x3b, 0x76, 0x02, 0xde, 0x5b, 0x62, 0xbd, 0x25, 0x46, 0x61, - 0xdd, 0xeb, 0x50, 0x1f, 0xda, 0xce, 0xf1, 0xd0, 0xed, 0x1f, 0x87, 0x06, 0x01, 0x66, 0x90, 0xda, - 0xd0, 0x76, 0x9e, 0xba, 0x7d, 0x53, 0x9a, 0x85, 0x72, 0x5a, 0x17, 0x3a, 0x67, 0x59, 0x70, 0x5a, - 0x17, 0x2a, 0xe7, 0x06, 0x2c, 0x51, 0x99, 0x3d, 0x8f, 0x58, 0x01, 0x89, 0x98, 0x2b, 0x8c, 0x79, - 0x71, 0x68, 0x3b, 0x5b, 0xac, 0x47, 0xe3, 0xb7, 0x2e, 0x12, 0xfc, 0x55, 0xc1, 0x6f, 0x5d, 0xe8, - 0xfc, 0x78, 0x03, 0x4a, 0xa1, 0xcd, 0x51, 0x11, 0x66, 0xf7, 0x0f, 0xf6, 0x3b, 0xf5, 0x19, 0x04, - 0x30, 0xdf, 0x3e, 0xda, 0xea, 0xec, 0x6f, 0xd7, 0x0d, 0x54, 0x86, 0xc2, 0x76, 0x87, 0x37, 0x72, - 0xf8, 0x11, 0x40, 0x64, 0x5d, 0x54, 0x80, 0xfc, 0x6e, 0xe7, 0xb3, 0xfa, 0x0c, 0xe5, 0x79, 0xde, - 0x31, 0x8f, 0x76, 0x0e, 0xf6, 0xeb, 0x06, 0x1d, 0xbc, 0x65, 0x76, 0xda, 0xdd, 0x4e, 0x3d, 0x47, - 0x39, 0x9e, 0x1e, 0x6c, 0xd7, 0xf3, 0xa8, 0x04, 0x73, 0xcf, 0xdb, 0x7b, 0xcf, 0x3a, 0xf5, 0x59, - 0xfc, 0x0b, 0x03, 0xaa, 0x62, 0xbd, 0xf8, 0x99, 0x40, 0xdf, 0x81, 0xf9, 0x33, 0x76, 0x2e, 0xd8, - 0x56, 0x2c, 0x6f, 0xde, 0x8c, 0x2d, 0xae, 0x76, 0x76, 0x4c, 0xc1, 0x8b, 0x30, 0xe4, 0xcf, 0x27, - 0x7e, 0x23, 0xb7, 0x96, 0x5f, 0x2f, 0x6f, 0xd6, 0x37, 0xf8, 0x79, 0xdd, 0xd8, 0x25, 0x97, 0xcf, - 0xad, 0xc1, 0x98, 0x98, 0xb4, 0x13, 0x21, 0x98, 0x1d, 0xba, 0x1e, 0x61, 0x3b, 0xb6, 0x68, 0xb2, - 0xdf, 0x74, 0x1b, 0xb3, 0x45, 0x13, 0xbb, 0x95, 0x37, 0xf0, 0xaf, 0x0c, 0x80, 0xc3, 0x71, 0x90, - 0x7d, 0x34, 0x96, 0x61, 0x6e, 0x42, 0x05, 0x8b, 0x63, 0xc1, 0x1b, 0xec, 0x4c, 0x10, 0xcb, 0x27, - 0xe1, 0x99, 0xa0, 0x0d, 0x74, 0x1d, 0x0a, 0x23, 0x8f, 0x4c, 0x8e, 0xcf, 0x27, 0x0c, 0xa4, 0x68, - 0xce, 0xd3, 0xe6, 0xee, 0x04, 0xbd, 0x09, 0x15, 0xfb, 0xd4, 0x71, 0x3d, 0x72, 0xcc, 0x65, 0xcd, - 0xb1, 0xde, 0x32, 0xa7, 0x31, 0xbd, 0x15, 0x16, 0x2e, 0x78, 0x5e, 0x65, 0xd9, 0xa3, 0x24, 0xec, - 0x40, 0x99, 0xa9, 0x3a, 0x95, 0xf9, 0xde, 0x89, 0x74, 0xcc, 0xb1, 0x61, 0x49, 0x13, 0x0a, 0xad, - 0xf1, 0x0f, 0x00, 0x6d, 0x93, 0x01, 0x09, 0xc8, 0x34, 0xde, 0x43, 0xb1, 0x49, 0x5e, 0xb5, 0x09, - 0xfe, 0xb9, 0x01, 0x4b, 0x9a, 0xf8, 0xa9, 0xa6, 0xd5, 0x80, 0x42, 0x9f, 0x09, 0xe3, 0x1a, 0xe4, - 0x4d, 0xd9, 0x44, 0x0f, 0xa0, 0x28, 0x14, 0xf0, 0x1b, 0xf9, 0x8c, 0x4d, 0x53, 0xe0, 0x3a, 0xf9, - 0xf8, 0x57, 0x39, 0x28, 0x89, 0x89, 0x1e, 0x8c, 0x50, 0x1b, 0xaa, 0x1e, 0x6f, 0x1c, 0xb3, 0xf9, - 0x08, 0x8d, 0x9a, 0xd9, 0x4e, 0xe8, 0xc9, 0x8c, 0x59, 0x11, 0x43, 0x18, 0x19, 0xfd, 0x21, 0x94, - 0xa5, 0x88, 0xd1, 0x38, 0x10, 0x26, 0x6f, 0xe8, 0x02, 0xa2, 0xfd, 0xf7, 0x64, 0xc6, 0x04, 0xc1, - 0x7e, 0x38, 0x0e, 0x50, 0x17, 0x96, 0xe5, 0x60, 0x3e, 0x1b, 0xa1, 0x46, 0x9e, 0x49, 0x59, 0xd3, - 0xa5, 0x24, 0x97, 0xea, 0xc9, 0x8c, 0x89, 0xc4, 0x78, 0xa5, 0x53, 0x55, 0x29, 0xb8, 0xe0, 0xce, - 0x3b, 0xa1, 0x52, 0xf7, 0xc2, 0x49, 0xaa, 0xd4, 0xbd, 0x70, 0x1e, 0x95, 0xa0, 0x20, 0x5a, 0xf8, - 0x5f, 0x73, 0x00, 0x72, 0x35, 0x0e, 0x46, 0x68, 0x1b, 0x6a, 0x9e, 0x68, 0x69, 0xd6, 0x7a, 0x23, - 0xd5, 0x5a, 0x62, 0x11, 0x67, 0xcc, 0xaa, 0x1c, 0xc4, 0x95, 0xfb, 0x1e, 0x54, 0x42, 0x29, 0x91, - 0xc1, 0x6e, 0xa4, 0x18, 0x2c, 0x94, 0x50, 0x96, 0x03, 0xa8, 0xc9, 0x3e, 0x81, 0x6b, 0xe1, 0xf8, - 0x14, 0x9b, 0xbd, 0x79, 0x85, 0xcd, 0x42, 0x81, 0x4b, 0x52, 0x82, 0x6a, 0x35, 0x55, 0xb1, 0xc8, - 0x6c, 0x37, 0x52, 0xcc, 0x96, 0x54, 0x8c, 0x1a, 0x0e, 0x68, 0xbc, 0xe4, 0x4d, 0xfc, 0x7f, 0x79, - 0x28, 0x6c, 0xb9, 0xc3, 0x91, 0xe5, 0xd1, 0xd5, 0x98, 0xf7, 0x88, 0x3f, 0x1e, 0x04, 0xcc, 0x5c, - 0xb5, 0xcd, 0x3b, 0xba, 0x44, 0xc1, 0x26, 0xff, 0x9a, 0x8c, 0xd5, 0x14, 0x43, 0xe8, 0x60, 0x11, - 0x1e, 0x73, 0xaf, 0x31, 0x58, 0x04, 0x47, 0x31, 0x44, 0x1e, 0xe4, 0x7c, 0x74, 0x90, 0x9b, 0x50, - 0x98, 0x10, 0x2f, 0x0a, 0xe9, 0x4f, 0x66, 0x4c, 0x49, 0x40, 0xef, 0xc0, 0x42, 0x3c, 0xbc, 0xcc, - 0x09, 0x9e, 0x5a, 0x4f, 0x8f, 0x46, 0x77, 0xa0, 0xa2, 0xc5, 0xb8, 0x79, 0xc1, 0x57, 0x1e, 0x2a, - 0x21, 0x6e, 0x45, 0xfa, 0x55, 0x1a, 0x8f, 0x2b, 0x4f, 0x66, 0xa4, 0x67, 0x5d, 0x91, 0x9e, 0xb5, - 0x28, 0x46, 0x09, 0xdf, 0xaa, 0x39, 0x99, 0xef, 0xeb, 0x4e, 0x06, 0x7f, 0x1f, 0xaa, 0x9a, 0x81, - 0x68, 0xdc, 0xe9, 0x7c, 0xfc, 0xac, 0xbd, 0xc7, 0x83, 0xd4, 0x63, 0x16, 0x97, 0xcc, 0xba, 0x41, - 0x63, 0xdd, 0x5e, 0xe7, 0xe8, 0xa8, 0x9e, 0x43, 0x55, 0x28, 0xed, 0x1f, 0x74, 0x8f, 0x39, 0x57, - 0x1e, 0x3f, 0x0e, 0x25, 0x88, 0x20, 0xa7, 0xc4, 0xb6, 0x19, 0x25, 0xb6, 0x19, 0x32, 0xb6, 0xe5, - 0xa2, 0xd8, 0xc6, 0xc2, 0xdc, 0x5e, 0xa7, 0x7d, 0xd4, 0xa9, 0xcf, 0x3e, 0xaa, 0x41, 0x85, 0xdb, - 0xf7, 0x78, 0xec, 0xd0, 0x50, 0xfb, 0xcf, 0x06, 0x40, 0x74, 0x9a, 0x50, 0x0b, 0x0a, 0x3d, 0x8e, - 0xd3, 0x30, 0x98, 0x33, 0xba, 0x96, 0xba, 0x64, 0xa6, 0xe4, 0x42, 0xdf, 0x82, 0x82, 0x3f, 0xee, - 0xf5, 0x88, 0x2f, 0x43, 0xde, 0xf5, 0xb8, 0x3f, 0x14, 0xde, 0xca, 0x94, 0x7c, 0x74, 0xc8, 0x0b, - 0xcb, 0x1e, 0x8c, 0x59, 0x00, 0xbc, 0x7a, 0x88, 0xe0, 0xc3, 0xff, 0x60, 0x40, 0x59, 0xd9, 0xbc, - 0xbf, 0xa7, 0x13, 0xbe, 0x09, 0x25, 0xa6, 0x03, 0xe9, 0x0b, 0x37, 0x5c, 0x34, 0x23, 0x02, 0xfa, - 0x03, 0x28, 0xc9, 0x13, 0x20, 0x3d, 0x71, 0x23, 0x5d, 0xec, 0xc1, 0xc8, 0x8c, 0x58, 0xf1, 0x2e, - 0x2c, 0x32, 0xab, 0xf4, 0x68, 0x72, 0x2d, 0xed, 0xa8, 0xa6, 0x9f, 0x46, 0x2c, 0xfd, 0x6c, 0x42, - 0x71, 0x74, 0x76, 0xe9, 0xdb, 0x3d, 0x6b, 0x20, 0xb4, 0x08, 0xdb, 0xf8, 0x23, 0x40, 0xaa, 0xb0, - 0x69, 0xa6, 0x8b, 0xab, 0x50, 0x7e, 0x62, 0xf9, 0x67, 0x42, 0x25, 0xfc, 0x00, 0xaa, 0xb4, 0xb9, - 0xfb, 0xfc, 0x35, 0x74, 0x64, 0x97, 0x03, 0xc9, 0x3d, 0x95, 0xcd, 0x11, 0xcc, 0x9e, 0x59, 0xfe, - 0x19, 0x9b, 0x68, 0xd5, 0x64, 0xbf, 0xd1, 0x3b, 0x50, 0xef, 0xf1, 0x49, 0x1e, 0xc7, 0xae, 0x0c, - 0x0b, 0x82, 0x1e, 0x66, 0x82, 0x9f, 0x42, 0x85, 0xcf, 0xe1, 0xab, 0x56, 0x02, 0x2f, 0xc2, 0xc2, - 0x91, 0x63, 0x8d, 0xfc, 0x33, 0x57, 0x46, 0x37, 0x3a, 0xe9, 0x7a, 0x44, 0x9b, 0x0a, 0xf1, 0x6d, - 0x58, 0xf0, 0xc8, 0xd0, 0xb2, 0x1d, 0xdb, 0x39, 0x3d, 0x3e, 0xb9, 0x0c, 0x88, 0x2f, 0x2e, 0x4c, - 0xb5, 0x90, 0xfc, 0x88, 0x52, 0xa9, 0x6a, 0x27, 0x03, 0xf7, 0x44, 0xb8, 0x39, 0xf6, 0x1b, 0xff, - 0x2c, 0x07, 0x95, 0x4f, 0xac, 0xa0, 0x27, 0x97, 0x0e, 0xed, 0x40, 0x2d, 0x74, 0x6e, 0x8c, 0x22, - 0x74, 0x89, 0x85, 0x58, 0x36, 0x46, 0xa6, 0xd2, 0x32, 0x3a, 0x56, 0x7b, 0x2a, 0x81, 0x89, 0xb2, - 0x9c, 0x1e, 0x19, 0x84, 0xa2, 0x72, 0xd9, 0xa2, 0x18, 0xa3, 0x2a, 0x4a, 0x25, 0xa0, 0x03, 0xa8, - 0x8f, 0x3c, 0xf7, 0xd4, 0x23, 0xbe, 0x1f, 0x0a, 0xe3, 0x61, 0x0c, 0xa7, 0x08, 0x3b, 0x14, 0xac, - 0x91, 0xb8, 0x85, 0x91, 0x4e, 0x7a, 0xb4, 0x10, 0xe5, 0x33, 0xdc, 0x39, 0xfd, 0x57, 0x0e, 0x50, - 0x72, 0x52, 0xbf, 0x6b, 0x8a, 0x77, 0x17, 0x6a, 0x7e, 0x60, 0x79, 0x89, 0xcd, 0x56, 0x65, 0xd4, - 0xd0, 0xe3, 0xbf, 0x0d, 0xa1, 0x42, 0xc7, 0x8e, 0x1b, 0xd8, 0x2f, 0x2e, 0x45, 0x96, 0x5c, 0x93, - 0xe4, 0x7d, 0x46, 0x45, 0x1d, 0x28, 0xbc, 0xb0, 0x07, 0x01, 0xf1, 0xfc, 0xc6, 0xdc, 0x5a, 0x7e, - 0xbd, 0xb6, 0xf9, 0xe0, 0x55, 0xcb, 0xb0, 0xf1, 0x21, 0xe3, 0xef, 0x5e, 0x8e, 0x88, 0x29, 0xc7, - 0xaa, 0x99, 0xe7, 0xbc, 0x96, 0x8d, 0xdf, 0x80, 0xe2, 0x4b, 0x2a, 0x82, 0xde, 0xb2, 0x0b, 0x3c, - 0x59, 0x64, 0x6d, 0x7e, 0xc9, 0x7e, 0xe1, 0x59, 0xa7, 0x43, 0xe2, 0x04, 0xf2, 0x1e, 0x28, 0xdb, - 0xf8, 0x2e, 0x40, 0x04, 0x43, 0x5d, 0xfe, 0xfe, 0xc1, 0xe1, 0xb3, 0x6e, 0x7d, 0x06, 0x55, 0xa0, - 0xb8, 0x7f, 0xb0, 0xdd, 0xd9, 0xeb, 0xd0, 0xf8, 0x80, 0x5b, 0xd2, 0xa4, 0xda, 0x5a, 0xaa, 0x98, - 0x86, 0x86, 0x89, 0x57, 0x60, 0x39, 0x6d, 0x01, 0x69, 0x2e, 0x5a, 0x15, 0xbb, 0x74, 0xaa, 0xa3, - 0xa2, 0x42, 0xe7, 0xf4, 0xe9, 0x36, 0xa0, 0xc0, 0x77, 0x6f, 0x5f, 0x24, 0xe7, 0xb2, 0x49, 0x0d, - 0xc1, 0x37, 0x23, 0xe9, 0x8b, 0x55, 0x0a, 0xdb, 0xa9, 0xee, 0x65, 0x2e, 0xd5, 0xbd, 0xa0, 0x3b, - 0x50, 0x0d, 0x4f, 0x83, 0xe5, 0x8b, 0x5c, 0xa0, 0x64, 0x56, 0xe4, 0x46, 0xa7, 0x34, 0xcd, 0xe8, - 0x05, 0xdd, 0xe8, 0xe8, 0x2e, 0xcc, 0x93, 0x09, 0x71, 0x02, 0xbf, 0x51, 0x66, 0x11, 0xa3, 0x2a, - 0x73, 0xf7, 0x0e, 0xa5, 0x9a, 0xa2, 0x13, 0x7f, 0x17, 0x16, 0xd9, 0x1d, 0xe9, 0xb1, 0x67, 0x39, - 0xea, 0x65, 0xae, 0xdb, 0xdd, 0x13, 0xe6, 0xa6, 0x3f, 0x51, 0x0d, 0x72, 0x3b, 0xdb, 0xc2, 0x08, - 0xb9, 0x9d, 0x6d, 0xfc, 0x13, 0x03, 0x90, 0x3a, 0x6e, 0x2a, 0x3b, 0xc7, 0x84, 0x4b, 0xf8, 0x7c, - 0x04, 0xbf, 0x0c, 0x73, 0xc4, 0xf3, 0x5c, 0x8f, 0x59, 0xb4, 0x64, 0xf2, 0x06, 0x7e, 0x4b, 0xe8, - 0x60, 0x92, 0x89, 0x7b, 0x1e, 0x9e, 0x41, 0x2e, 0xcd, 0x08, 0x55, 0xdd, 0x85, 0x25, 0x8d, 0x6b, - 0xaa, 0xc8, 0xf5, 0x21, 0x2c, 0x30, 0x61, 0x5b, 0x67, 0xa4, 0x77, 0x3e, 0x72, 0x6d, 0x27, 0x81, - 0x47, 0x57, 0x2e, 0x72, 0xb0, 0x74, 0x1e, 0x7c, 0x62, 0x95, 0x90, 0xd8, 0xed, 0xee, 0xe1, 0xcf, - 0x60, 0x25, 0x26, 0x47, 0xaa, 0xff, 0x27, 0x50, 0xee, 0x85, 0x44, 0x5f, 0xe4, 0x3a, 0xb7, 0x74, - 0xe5, 0xe2, 0x43, 0xd5, 0x11, 0xf8, 0x00, 0xae, 0x27, 0x44, 0x4f, 0x35, 0xe7, 0xb7, 0xe1, 0x1a, - 0x13, 0xb8, 0x4b, 0xc8, 0xa8, 0x3d, 0xb0, 0x27, 0x99, 0x96, 0x1e, 0x89, 0x49, 0x29, 0x8c, 0x5f, - 0xef, 0xbe, 0xc0, 0x7f, 0x24, 0x10, 0xbb, 0xf6, 0x90, 0x74, 0xdd, 0xbd, 0x6c, 0xdd, 0x68, 0x34, - 0x3b, 0x27, 0x97, 0xbe, 0x48, 0x6b, 0xd8, 0x6f, 0xfc, 0x2f, 0x86, 0x30, 0x95, 0x3a, 0xfc, 0x6b, - 0xde, 0xc9, 0xab, 0x00, 0xa7, 0xf4, 0xc8, 0x90, 0x3e, 0xed, 0xe0, 0x15, 0x15, 0x85, 0x12, 0xea, - 0x49, 0xfd, 0x77, 0x45, 0xe8, 0xb9, 0x2c, 0xf6, 0x39, 0xfb, 0x27, 0xf4, 0x72, 0xb7, 0xa0, 0xcc, - 0x08, 0x47, 0x81, 0x15, 0x8c, 0xfd, 0xc4, 0x62, 0xfc, 0x95, 0xd8, 0xf6, 0x72, 0xd0, 0x54, 0xf3, - 0xfa, 0x16, 0xcc, 0xb3, 0xcb, 0x84, 0x4c, 0xa5, 0x6f, 0xa4, 0xec, 0x47, 0xae, 0x87, 0x29, 0x18, - 0xf1, 0xcf, 0x0c, 0x98, 0x7f, 0xca, 0x4a, 0xb0, 0x8a, 0x6a, 0xb3, 0x72, 0x2d, 0x1c, 0x6b, 0xc8, - 0x0b, 0x43, 0x25, 0x93, 0xfd, 0x66, 0xa9, 0x27, 0x21, 0xde, 0x33, 0x73, 0x8f, 0xa7, 0xb8, 0x25, - 0x33, 0x6c, 0x53, 0x9b, 0xf5, 0x06, 0x36, 0x71, 0x02, 0xd6, 0x3b, 0xcb, 0x7a, 0x15, 0x0a, 0xcd, - 0x9e, 0x6d, 0x7f, 0x8f, 0x58, 0x9e, 0x23, 0x8a, 0xa6, 0x45, 0x33, 0x22, 0xe0, 0x3d, 0xa8, 0x73, - 0x3d, 0xda, 0xfd, 0xbe, 0x92, 0x60, 0x86, 0x68, 0x46, 0x0c, 0x4d, 0x93, 0x96, 0x8b, 0x4b, 0xfb, - 0xa5, 0x01, 0x8b, 0x8a, 0xb8, 0xa9, 0xac, 0xfa, 0x2e, 0xcc, 0xf3, 0x22, 0xb5, 0xc8, 0x74, 0x96, - 0xf5, 0x51, 0x1c, 0xc6, 0x14, 0x3c, 0x68, 0x03, 0x0a, 0xfc, 0x97, 0xbc, 0x03, 0xa4, 0xb3, 0x4b, - 0x26, 0x7c, 0x17, 0x96, 0x04, 0x89, 0x0c, 0xdd, 0xb4, 0x83, 0xc1, 0x16, 0x03, 0xff, 0x05, 0x2c, - 0xeb, 0x6c, 0x53, 0x4d, 0x49, 0x51, 0x32, 0xf7, 0x3a, 0x4a, 0xb6, 0xa5, 0x92, 0xcf, 0x46, 0x7d, - 0x25, 0x8f, 0x8a, 0xef, 0x18, 0x75, 0xbd, 0x72, 0xfa, 0x7a, 0x45, 0x13, 0x90, 0x22, 0xbe, 0xd1, - 0x09, 0x7c, 0x20, 0xb7, 0xc3, 0x9e, 0xed, 0x87, 0x3e, 0x1c, 0x43, 0x65, 0x60, 0x3b, 0xc4, 0xf2, - 0x44, 0xe5, 0xdc, 0xe0, 0x95, 0x73, 0x95, 0x86, 0xbf, 0x00, 0xa4, 0x0e, 0xfc, 0x46, 0x95, 0xbe, - 0x27, 0x4d, 0x76, 0xe8, 0xb9, 0x43, 0x37, 0xd3, 0xec, 0xf8, 0x2f, 0xe1, 0x5a, 0x8c, 0xef, 0x1b, - 0x55, 0x73, 0x09, 0x16, 0xb7, 0x89, 0x4c, 0x68, 0xa4, 0xdb, 0xfb, 0x08, 0x90, 0x4a, 0x9c, 0x2a, - 0xb2, 0xb5, 0x60, 0xf1, 0xa9, 0x3b, 0xa1, 0x2e, 0x92, 0x52, 0x23, 0xdf, 0xc0, 0xeb, 0x10, 0xa1, - 0x29, 0xc2, 0x36, 0x05, 0x57, 0x07, 0x4c, 0x05, 0xfe, 0x1f, 0x06, 0x54, 0xda, 0x03, 0xcb, 0x1b, - 0x4a, 0xe0, 0xef, 0xc1, 0x3c, 0xbf, 0x5d, 0x8b, 0x82, 0xd6, 0x3d, 0x5d, 0x8c, 0xca, 0xcb, 0x1b, - 0x6d, 0x7e, 0x17, 0x17, 0xa3, 0xa8, 0xe2, 0xe2, 0xcd, 0x6b, 0x3b, 0xf6, 0x06, 0xb6, 0x8d, 0xde, - 0x83, 0x39, 0x8b, 0x0e, 0x61, 0xa1, 0xa8, 0x16, 0xaf, 0x6b, 0x30, 0x69, 0xec, 0x0e, 0xc0, 0xb9, - 0xf0, 0x77, 0xa0, 0xac, 0x20, 0xa0, 0x02, 0xe4, 0x1f, 0x77, 0x44, 0xc2, 0xde, 0xde, 0xea, 0xee, - 0x3c, 0xe7, 0x05, 0x9d, 0x1a, 0xc0, 0x76, 0x27, 0x6c, 0xe7, 0xf0, 0xa7, 0x62, 0x94, 0x70, 0xfb, - 0xaa, 0x3e, 0x46, 0x96, 0x3e, 0xb9, 0xd7, 0xd2, 0xe7, 0x02, 0xaa, 0x62, 0xfa, 0xd3, 0x86, 0x31, - 0x26, 0x2f, 0x23, 0x8c, 0x29, 0xca, 0x9b, 0x82, 0x11, 0xff, 0xda, 0x80, 0xfa, 0xb6, 0xfb, 0xd2, - 0x39, 0xf5, 0xac, 0x7e, 0x78, 0x4e, 0x3e, 0x8c, 0xad, 0xd4, 0x46, 0xac, 0x38, 0x1a, 0xe3, 0x8f, - 0x08, 0xb1, 0x15, 0x6b, 0x44, 0x65, 0x43, 0x1e, 0x0b, 0x65, 0x13, 0x7f, 0x00, 0x0b, 0xb1, 0x41, - 0xd4, 0xf6, 0xcf, 0xdb, 0x7b, 0x3b, 0xdb, 0xd4, 0xd6, 0xac, 0xb0, 0xd6, 0xd9, 0x6f, 0x3f, 0xda, - 0xeb, 0x88, 0x07, 0xa4, 0xf6, 0xfe, 0x56, 0x67, 0xaf, 0x9e, 0xc3, 0x3d, 0x58, 0x54, 0xe0, 0xa7, - 0x7d, 0x19, 0xc8, 0xd0, 0x6e, 0x01, 0xaa, 0x22, 0xda, 0x8b, 0x43, 0xf9, 0xef, 0x39, 0xa8, 0x49, - 0xca, 0xd7, 0x83, 0x89, 0x56, 0x60, 0xbe, 0x7f, 0x72, 0x64, 0x7f, 0x21, 0x5f, 0x8e, 0x44, 0x8b, - 0xd2, 0x07, 0x1c, 0x87, 0x3f, 0xdf, 0x8a, 0x16, 0x0d, 0xe3, 0x9e, 0xf5, 0x22, 0xd8, 0x71, 0xfa, - 0xe4, 0x82, 0x25, 0x05, 0xb3, 0x66, 0x44, 0x60, 0x15, 0x26, 0xf1, 0xcc, 0xcb, 0x6e, 0x56, 0xca, - 0xb3, 0x2f, 0xba, 0x0f, 0x75, 0xfa, 0xbb, 0x3d, 0x1a, 0x0d, 0x6c, 0xd2, 0xe7, 0x02, 0x0a, 0x8c, - 0x27, 0x41, 0xa7, 0xe8, 0xec, 0x2e, 0xe2, 0x37, 0x8a, 0x2c, 0x2c, 0x89, 0x16, 0x5a, 0x83, 0x32, - 0xd7, 0x6f, 0xc7, 0x79, 0xe6, 0x13, 0xf6, 0xf6, 0x99, 0x37, 0x55, 0x92, 0x9e, 0x66, 0x40, 0x3c, - 0xcd, 0x58, 0x82, 0xc5, 0xf6, 0x38, 0x38, 0xeb, 0x38, 0x34, 0x56, 0x48, 0x2b, 0x2f, 0x03, 0xa2, - 0xc4, 0x6d, 0xdb, 0x57, 0xa9, 0x82, 0x55, 0x5f, 0x90, 0x0e, 0x2c, 0x51, 0x22, 0x71, 0x02, 0xbb, - 0xa7, 0xc4, 0x55, 0x99, 0x79, 0x19, 0xb1, 0xcc, 0xcb, 0xf2, 0xfd, 0x97, 0xae, 0xd7, 0x17, 0x36, - 0x0f, 0xdb, 0xf8, 0x9f, 0x0c, 0x0e, 0xf9, 0xcc, 0xd7, 0xd2, 0xa7, 0xdf, 0x51, 0x0c, 0x7a, 0x1f, - 0x0a, 0xee, 0x88, 0xbd, 0xf0, 0x8b, 0x32, 0xcc, 0xca, 0x06, 0xff, 0x26, 0x60, 0x43, 0x08, 0x3e, - 0xe0, 0xbd, 0xa6, 0x64, 0x43, 0xf7, 0xa0, 0x76, 0x66, 0xf9, 0x67, 0xa4, 0x7f, 0x28, 0x65, 0xf2, - 0x9b, 0x5f, 0x8c, 0x8a, 0xd7, 0x23, 0xfd, 0x1e, 0x93, 0xe0, 0x0a, 0xfd, 0xf0, 0x03, 0xb8, 0x26, - 0x39, 0xc5, 0xeb, 0xc4, 0x15, 0xcc, 0x2f, 0xe1, 0x96, 0x64, 0xde, 0x3a, 0xb3, 0x9c, 0x53, 0x22, - 0x01, 0x7f, 0x5f, 0x0b, 0x24, 0xe7, 0x93, 0x4f, 0x9d, 0xcf, 0x23, 0x68, 0x84, 0xf3, 0x61, 0x37, - 0x6b, 0x77, 0xa0, 0x2a, 0x3a, 0xf6, 0xc5, 0x79, 0x2a, 0x99, 0xec, 0x37, 0xa5, 0x79, 0xee, 0x20, - 0x4c, 0xa5, 0xe9, 0x6f, 0xbc, 0x05, 0x37, 0xa4, 0x0c, 0x71, 0xe7, 0xd5, 0x85, 0x24, 0x14, 0x4f, - 0x13, 0x22, 0x0c, 0x4b, 0x87, 0x5e, 0xbd, 0xf0, 0x2a, 0xa7, 0xbe, 0x04, 0x4c, 0xa6, 0xa1, 0xc8, - 0xbc, 0xc6, 0x37, 0x25, 0x55, 0x4c, 0xc9, 0x96, 0x24, 0x99, 0x0a, 0x50, 0xc9, 0x62, 0xc1, 0x28, - 0x39, 0xb1, 0x60, 0x09, 0xd1, 0x3f, 0x80, 0xd5, 0x50, 0x09, 0x6a, 0xb7, 0x43, 0xe2, 0x0d, 0x6d, - 0xdf, 0x57, 0xea, 0xde, 0x69, 0x13, 0xbf, 0x07, 0xb3, 0x23, 0x22, 0x82, 0x50, 0x79, 0x13, 0xc9, - 0x4d, 0xa9, 0x0c, 0x66, 0xfd, 0xb8, 0x0f, 0xb7, 0xa5, 0x74, 0x6e, 0xd1, 0x54, 0xf1, 0x71, 0xa5, - 0x64, 0x35, 0x30, 0x97, 0x51, 0x0d, 0xcc, 0xc7, 0xde, 0x62, 0x3e, 0xe2, 0x86, 0x94, 0x67, 0x7e, - 0xaa, 0xe4, 0x62, 0x97, 0xdb, 0x34, 0x74, 0x15, 0x53, 0x09, 0xfb, 0x1b, 0xe1, 0x05, 0xbe, 0x2a, - 0x0f, 0x4f, 0xd8, 0x0c, 0xe5, 0x43, 0x87, 0x6c, 0xd2, 0xac, 0x99, 0x2e, 0x80, 0xa9, 0xd6, 0x42, - 0x67, 0x4d, 0x8d, 0x86, 0x4f, 0x60, 0x59, 0xf7, 0x6b, 0x53, 0xe9, 0xb2, 0x0c, 0x73, 0x81, 0x7b, - 0x4e, 0x64, 0xac, 0xe1, 0x0d, 0x69, 0xbb, 0xd0, 0xe7, 0x4d, 0x65, 0x3b, 0x2b, 0x12, 0xc6, 0x4e, - 0xc7, 0xb4, 0xfa, 0xd2, 0x8d, 0x25, 0xef, 0x40, 0xbc, 0x81, 0xf7, 0x61, 0x25, 0xee, 0xd9, 0xa6, - 0x52, 0xf9, 0x39, 0x3f, 0x4b, 0x69, 0xce, 0x6f, 0x2a, 0xb9, 0x1f, 0x47, 0x7e, 0x49, 0xf1, 0x6d, - 0x53, 0x89, 0x34, 0xa1, 0x99, 0xe6, 0xea, 0xbe, 0x8a, 0xa3, 0x13, 0x7a, 0xbe, 0xa9, 0x84, 0xf9, - 0x91, 0xb0, 0xe9, 0x97, 0x3f, 0x72, 0x57, 0xf9, 0x2b, 0xdd, 0x95, 0x38, 0x24, 0x91, 0x43, 0xfd, - 0x1a, 0x36, 0x9d, 0xc0, 0x88, 0x7c, 0xf9, 0xb4, 0x18, 0x34, 0x9c, 0x85, 0x18, 0xac, 0x21, 0x37, - 0xb6, 0x1a, 0x01, 0xa6, 0x5a, 0x8c, 0x4f, 0x22, 0x37, 0x9e, 0x08, 0x12, 0x53, 0x09, 0xfe, 0x14, - 0xd6, 0xb2, 0xe3, 0xc3, 0x34, 0x92, 0xef, 0xb7, 0xa0, 0x14, 0x5e, 0x86, 0x94, 0xef, 0xcd, 0xca, - 0x50, 0xd8, 0x3f, 0x38, 0x3a, 0x6c, 0x6f, 0x75, 0xf8, 0x07, 0x67, 0x5b, 0x07, 0xa6, 0xf9, 0xec, - 0xb0, 0x5b, 0xcf, 0x6d, 0xfe, 0x26, 0x0f, 0xb9, 0xdd, 0xe7, 0xe8, 0x33, 0x98, 0xe3, 0x5f, 0x5f, - 0x5c, 0xf1, 0xc9, 0x4d, 0xf3, 0xaa, 0x0f, 0x4c, 0xf0, 0xf5, 0x9f, 0xfc, 0xf7, 0x6f, 0x7e, 0x91, - 0x5b, 0xc4, 0x95, 0xd6, 0xe4, 0xdb, 0xad, 0xf3, 0x49, 0x8b, 0x85, 0xa9, 0x87, 0xc6, 0x7d, 0xf4, - 0x31, 0xe4, 0x0f, 0xc7, 0x01, 0xca, 0xfc, 0x14, 0xa7, 0x99, 0xfd, 0xcd, 0x09, 0xbe, 0xc6, 0x84, - 0x2e, 0x60, 0x10, 0x42, 0x47, 0xe3, 0x80, 0x8a, 0xfc, 0x21, 0x94, 0xd5, 0x2f, 0x46, 0x5e, 0xf9, - 0x7d, 0x4e, 0xf3, 0xd5, 0x5f, 0xa3, 0xe0, 0x5b, 0x0c, 0xea, 0x3a, 0x46, 0x02, 0x8a, 0x7f, 0xd3, - 0xa2, 0xce, 0xa2, 0x7b, 0xe1, 0xa0, 0xcc, 0xaf, 0x77, 0x9a, 0xd9, 0x1f, 0xa8, 0x24, 0x66, 0x11, - 0x5c, 0x38, 0x54, 0xe4, 0x9f, 0x89, 0x6f, 0x53, 0x7a, 0x01, 0xba, 0x9d, 0xf2, 0x6d, 0x82, 0xfa, - 0x0a, 0xdf, 0x5c, 0xcb, 0x66, 0x10, 0x20, 0x37, 0x19, 0xc8, 0x0a, 0x5e, 0x14, 0x20, 0xbd, 0x90, - 0xe5, 0xa1, 0x71, 0x7f, 0xb3, 0x07, 0x73, 0xec, 0x85, 0x0b, 0x7d, 0x2e, 0x7f, 0x34, 0x53, 0x9e, - 0xfa, 0x32, 0x16, 0x5a, 0x7b, 0x1b, 0xc3, 0xcb, 0x0c, 0xa8, 0x86, 0x4b, 0x14, 0x88, 0xbd, 0x6f, - 0x3d, 0x34, 0xee, 0xaf, 0x1b, 0xef, 0x1b, 0x9b, 0xbf, 0x9e, 0x83, 0x39, 0x56, 0xda, 0x45, 0xe7, - 0x00, 0xd1, 0x6b, 0x4f, 0x7c, 0x76, 0x89, 0xf7, 0xa3, 0xf8, 0xec, 0x92, 0x0f, 0x45, 0xb8, 0xc9, - 0x40, 0x97, 0xf1, 0x02, 0x05, 0x65, 0x15, 0xe3, 0x16, 0x2b, 0x82, 0x53, 0x3b, 0xfe, 0xad, 0x21, - 0x2a, 0xdb, 0xfc, 0x2c, 0xa1, 0x34, 0x69, 0xda, 0x93, 0x4f, 0x7c, 0x3b, 0xa4, 0x3c, 0xf7, 0xe0, - 0xef, 0x32, 0xc0, 0x16, 0xae, 0x47, 0x80, 0x1e, 0xe3, 0x78, 0x68, 0xdc, 0xff, 0xbc, 0x81, 0x97, - 0x84, 0x95, 0x63, 0x3d, 0xe8, 0x47, 0x50, 0xd3, 0x9f, 0x34, 0xd0, 0x9d, 0x14, 0xac, 0xf8, 0xcb, - 0x48, 0xf3, 0xad, 0xab, 0x99, 0x84, 0x4e, 0xab, 0x4c, 0x27, 0x01, 0xce, 0x91, 0xcf, 0x09, 0x19, - 0x59, 0x94, 0x49, 0xac, 0x01, 0xfa, 0x47, 0x43, 0xbc, 0x38, 0x45, 0x6f, 0x14, 0x28, 0x4d, 0x7a, - 0xe2, 0x05, 0xa4, 0x79, 0xf7, 0x15, 0x5c, 0x42, 0x89, 0x3f, 0x66, 0x4a, 0x7c, 0x80, 0x97, 0x23, - 0x25, 0x02, 0x7b, 0x48, 0x02, 0x57, 0x68, 0xf1, 0xf9, 0x4d, 0x7c, 0x5d, 0x33, 0x8e, 0xd6, 0x1b, - 0x2d, 0x16, 0x7f, 0x67, 0x48, 0x5d, 0x2c, 0xed, 0xdd, 0x22, 0x75, 0xb1, 0xf4, 0x47, 0x8a, 0xb4, - 0xc5, 0xe2, 0xaf, 0x0a, 0x69, 0x8b, 0x15, 0xf6, 0x6c, 0xfe, 0xff, 0x2c, 0x14, 0xb6, 0xf8, 0x37, - 0xe1, 0xc8, 0x85, 0x52, 0x58, 0xa6, 0x47, 0xab, 0x69, 0x75, 0xc6, 0xe8, 0x5a, 0xd3, 0xbc, 0x9d, - 0xd9, 0x2f, 0x14, 0x7a, 0x93, 0x29, 0xf4, 0x06, 0x5e, 0xa1, 0xc8, 0xe2, 0xb3, 0xf3, 0x16, 0x2f, - 0x66, 0xb5, 0xac, 0x7e, 0x9f, 0x1a, 0xe2, 0xcf, 0xa1, 0xa2, 0xd6, 0xd1, 0xd1, 0x9b, 0xa9, 0xb5, - 0x4d, 0xb5, 0x14, 0xdf, 0xc4, 0x57, 0xb1, 0x08, 0xe4, 0xb7, 0x18, 0xf2, 0x2a, 0xbe, 0x91, 0x82, - 0xec, 0x31, 0x56, 0x0d, 0x9c, 0xd7, 0xc0, 0xd3, 0xc1, 0xb5, 0x12, 0x7b, 0x3a, 0xb8, 0x5e, 0x42, - 0xbf, 0x12, 0x7c, 0xcc, 0x58, 0x29, 0xb8, 0x0f, 0x10, 0x55, 0xb2, 0x51, 0xaa, 0x2d, 0x95, 0x7b, - 0x5d, 0xdc, 0x39, 0x24, 0x8b, 0xe0, 0x18, 0x33, 0x58, 0xb1, 0xef, 0x62, 0xb0, 0x03, 0xdb, 0x0f, - 0xf8, 0xc1, 0xac, 0x6a, 0xa5, 0x69, 0x94, 0x3a, 0x1f, 0xbd, 0xbe, 0xdd, 0xbc, 0x73, 0x25, 0x8f, - 0x40, 0xbf, 0xcb, 0xd0, 0x6f, 0xe3, 0x66, 0x0a, 0xfa, 0x88, 0xf3, 0xd2, 0xcd, 0xf6, 0xe3, 0x02, - 0x94, 0x9f, 0x5a, 0xb6, 0x13, 0x10, 0xc7, 0x72, 0x7a, 0x04, 0x9d, 0xc0, 0x1c, 0x8b, 0xd4, 0x71, - 0x47, 0xac, 0x96, 0x6d, 0xe3, 0x8e, 0x58, 0xab, 0x69, 0xe2, 0x35, 0x06, 0xdc, 0xc4, 0xd7, 0x28, - 0xf0, 0x30, 0x12, 0xdd, 0x62, 0xa5, 0x48, 0x3a, 0xe9, 0x17, 0x30, 0x2f, 0x5e, 0xfb, 0x62, 0x82, - 0xb4, 0xe2, 0x4f, 0xf3, 0x66, 0x7a, 0x67, 0xda, 0x5e, 0x56, 0x61, 0x7c, 0xc6, 0x47, 0x71, 0x26, - 0x00, 0x51, 0x8d, 0x3d, 0xbe, 0xa2, 0x89, 0x92, 0x7c, 0x73, 0x2d, 0x9b, 0x21, 0xcd, 0xa6, 0x2a, - 0x66, 0x3f, 0xe4, 0xa5, 0xb8, 0x7f, 0x0a, 0xb3, 0x4f, 0x2c, 0xff, 0x0c, 0xc5, 0x62, 0xaf, 0xf2, - 0xad, 0x58, 0xb3, 0x99, 0xd6, 0x25, 0x50, 0x6e, 0x33, 0x94, 0x1b, 0xdc, 0x95, 0xa9, 0x28, 0x67, - 0x96, 0x7f, 0x26, 0xec, 0xc7, 0x3f, 0x1d, 0x8b, 0xdb, 0x4f, 0xfb, 0xfc, 0x2c, 0x6e, 0x3f, 0xfd, - 0x6b, 0xb3, 0x6c, 0xfb, 0x51, 0x94, 0xf3, 0x09, 0xc5, 0x19, 0x41, 0x51, 0x7e, 0xad, 0x85, 0x62, - 0x4f, 0xf7, 0xb1, 0x2f, 0xbb, 0x9a, 0xab, 0x59, 0xdd, 0x02, 0xed, 0x0e, 0x43, 0xbb, 0x85, 0x1b, - 0x89, 0xd5, 0x12, 0x9c, 0x0f, 0x8d, 0xfb, 0xef, 0x1b, 0xe8, 0x47, 0x00, 0xd1, 0xc3, 0x44, 0xe2, - 0x0c, 0xc6, 0xdf, 0x38, 0x12, 0x67, 0x30, 0xf1, 0xa6, 0x81, 0x37, 0x18, 0xee, 0x3a, 0xbe, 0x13, - 0xc7, 0x0d, 0x3c, 0xcb, 0xf1, 0x5f, 0x10, 0xef, 0x3d, 0x5e, 0x67, 0xf5, 0xcf, 0xec, 0x11, 0x9d, - 0xb2, 0x07, 0xa5, 0xb0, 0xee, 0x1c, 0xf7, 0xb7, 0xf1, 0x7a, 0x78, 0xdc, 0xdf, 0x26, 0x0a, 0xd6, - 0xba, 0xe3, 0xd1, 0xf6, 0x8b, 0x64, 0xa5, 0x47, 0xf0, 0x97, 0x75, 0x98, 0xa5, 0x79, 0x37, 0x4d, - 0x4f, 0xa2, 0xca, 0x49, 0x7c, 0xf6, 0x89, 0x3a, 0x6a, 0x7c, 0xf6, 0xc9, 0xa2, 0x8b, 0x9e, 0x9e, - 0xd0, 0x6b, 0x56, 0x8b, 0x17, 0x29, 0xe8, 0x4c, 0x5d, 0x28, 0x2b, 0xa5, 0x15, 0x94, 0x22, 0x4c, - 0x2f, 0xd0, 0xc6, 0x03, 0x5e, 0x4a, 0x5d, 0x06, 0xbf, 0xc1, 0xf0, 0xae, 0xf1, 0x80, 0xc7, 0xf0, - 0xfa, 0x9c, 0x83, 0x02, 0x8a, 0xd9, 0x89, 0x93, 0x9f, 0x32, 0x3b, 0xfd, 0xf4, 0xaf, 0x65, 0x33, - 0x64, 0xce, 0x2e, 0x3a, 0xfa, 0x2f, 0xa1, 0xa2, 0x16, 0x58, 0x50, 0x8a, 0xf2, 0xb1, 0xa2, 0x72, - 0x3c, 0x92, 0xa4, 0xd5, 0x67, 0x74, 0xdf, 0xc6, 0x20, 0x2d, 0x85, 0x8d, 0x02, 0x0f, 0xa0, 0x20, - 0x2a, 0x2e, 0x69, 0x26, 0xd5, 0x0b, 0xd0, 0x69, 0x26, 0x8d, 0x95, 0x6b, 0xf4, 0xfc, 0x99, 0x21, - 0xd2, 0x4b, 0xa5, 0x8c, 0xd6, 0x02, 0xed, 0x31, 0x09, 0xb2, 0xd0, 0xa2, 0x5a, 0x66, 0x16, 0x9a, - 0x72, 0xa1, 0xcf, 0x42, 0x3b, 0x25, 0x81, 0xf0, 0x07, 0xf2, 0xa2, 0x8c, 0x32, 0x84, 0xa9, 0x11, - 0x12, 0x5f, 0xc5, 0x92, 0x76, 0xbd, 0x89, 0x00, 0x65, 0x78, 0xbc, 0x00, 0x88, 0xea, 0x41, 0xf1, - 0x9c, 0x35, 0xb5, 0x0e, 0x1e, 0xcf, 0x59, 0xd3, 0x4b, 0x4a, 0xba, 0x8f, 0x8d, 0x70, 0xf9, 0xed, - 0x8a, 0x22, 0xff, 0xdc, 0x00, 0x94, 0x2c, 0x1d, 0xa1, 0x07, 0xe9, 0xd2, 0x53, 0xab, 0xeb, 0xcd, - 0x77, 0x5f, 0x8f, 0x39, 0xcd, 0x21, 0x47, 0x2a, 0xf5, 0x18, 0xf7, 0xe8, 0x25, 0x55, 0xea, 0xc7, - 0x06, 0x54, 0xb5, 0xba, 0x13, 0xba, 0x97, 0xb1, 0xa6, 0xb1, 0xa2, 0x7b, 0xf3, 0xed, 0x57, 0xf2, - 0xa5, 0x25, 0xf3, 0xca, 0x0e, 0x90, 0xb7, 0x9a, 0x9f, 0x1a, 0x50, 0xd3, 0xeb, 0x54, 0x28, 0x43, - 0x76, 0xa2, 0x68, 0xdf, 0x5c, 0x7f, 0x35, 0xe3, 0xd5, 0xcb, 0x13, 0x5d, 0x68, 0x06, 0x50, 0x10, - 0x95, 0xad, 0xb4, 0x8d, 0xaf, 0x97, 0xfb, 0xd3, 0x36, 0x7e, 0xac, 0x2c, 0x96, 0xb2, 0xf1, 0x3d, - 0x77, 0x40, 0x94, 0x63, 0x26, 0x4a, 0x5f, 0x59, 0x68, 0x57, 0x1f, 0xb3, 0x58, 0xdd, 0x2c, 0x0b, - 0x2d, 0x3a, 0x66, 0xb2, 0xe6, 0x85, 0x32, 0x84, 0xbd, 0xe2, 0x98, 0xc5, 0x4b, 0x66, 0x29, 0xc7, - 0x8c, 0x01, 0x2a, 0xc7, 0x2c, 0xaa, 0x4e, 0xa5, 0x1d, 0xb3, 0xc4, 0xeb, 0x45, 0xda, 0x31, 0x4b, - 0x16, 0xb8, 0x52, 0xd6, 0x91, 0xe1, 0x6a, 0xc7, 0x6c, 0x29, 0xa5, 0x90, 0x85, 0xde, 0xcd, 0x30, - 0x62, 0xea, 0xa3, 0x48, 0xf3, 0xbd, 0xd7, 0xe4, 0xce, 0xdc, 0xe3, 0xdc, 0xfc, 0x72, 0x8f, 0xff, - 0x9d, 0x01, 0xcb, 0x69, 0x45, 0x30, 0x94, 0x81, 0x93, 0xf1, 0x98, 0xd2, 0xdc, 0x78, 0x5d, 0xf6, - 0xab, 0xad, 0x15, 0xee, 0xfa, 0x47, 0xf5, 0x7f, 0xfb, 0x72, 0xd5, 0xf8, 0xcf, 0x2f, 0x57, 0x8d, - 0xff, 0xf9, 0x72, 0xd5, 0xf8, 0xfb, 0xff, 0x5d, 0x9d, 0x39, 0x99, 0x67, 0xff, 0xd5, 0xf8, 0xdb, - 0xbf, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x8b, 0x54, 0x11, 0xdf, 0xef, 0x3c, 0x00, 0x00, + // 4511 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x3c, 0x5d, 0x6f, 0x1c, 0x47, + 0x72, 0x9c, 0x5d, 0x92, 0xcb, 0xad, 0x5d, 0x2e, 0x97, 0x2d, 0x4a, 0x5a, 0xad, 0x25, 0x8a, 0x1e, + 0x59, 0xb6, 0x2c, 0x5b, 0x5c, 0x89, 0x94, 0xec, 0x44, 0x81, 0x9d, 0x5b, 0x91, 0x6b, 0x89, 0x11, + 0x45, 0xd2, 0xc3, 0x95, 0x7c, 0x56, 0x80, 0x63, 0x86, 0xbb, 0xad, 0xe5, 0x1c, 0x77, 0x67, 0xf6, + 0x66, 0x86, 0x2b, 0xd2, 0x79, 0x38, 0xe7, 0x92, 0xcb, 0xe1, 0x12, 0xe0, 0x80, 0x38, 0x40, 0x70, + 0x08, 0x92, 0x97, 0x20, 0x40, 0xf2, 0x90, 0x04, 0xc9, 0x43, 0x1e, 0x82, 0x04, 0xc8, 0x43, 0xf2, + 0x90, 0x3c, 0x04, 0x08, 0x90, 0x87, 0xbc, 0x26, 0xce, 0x3d, 0xe5, 0x57, 0x1c, 0xfa, 0x6b, 0xba, + 0x67, 0xa6, 0x87, 0x94, 0x8f, 0x34, 0xfc, 0x62, 0xed, 0x74, 0x55, 0x57, 0x55, 0x57, 0x75, 0x55, + 0x75, 0x57, 0xb5, 0x09, 0x45, 0x7f, 0xd8, 0x59, 0x1c, 0xfa, 0x5e, 0xe8, 0xa1, 0x32, 0x0e, 0x3b, + 0xdd, 0x00, 0xfb, 0x23, 0xec, 0x0f, 0x77, 0xeb, 0x73, 0x3d, 0xaf, 0xe7, 0x51, 0x40, 0x83, 0xfc, + 0x62, 0x38, 0xf5, 0x1a, 0xc1, 0x69, 0xd8, 0x43, 0xa7, 0x31, 0x18, 0x75, 0x3a, 0xc3, 0xdd, 0xc6, + 0xfe, 0x88, 0x43, 0xea, 0x11, 0xc4, 0x3e, 0x08, 0xf7, 0x86, 0xbb, 0xf4, 0x1f, 0x0e, 0x5b, 0x88, + 0x60, 0x23, 0xec, 0x07, 0x8e, 0xe7, 0x0e, 0x77, 0xc5, 0x2f, 0x8e, 0x71, 0xb9, 0xe7, 0x79, 0xbd, + 0x3e, 0x66, 0xf3, 0x5d, 0xd7, 0x0b, 0xed, 0xd0, 0xf1, 0xdc, 0x80, 0x43, 0xd9, 0x3f, 0x9d, 0x5b, + 0x3d, 0xec, 0xde, 0xf2, 0x86, 0xd8, 0xb5, 0x87, 0xce, 0x68, 0xa9, 0xe1, 0x0d, 0x29, 0x4e, 0x1a, + 0xdf, 0xfc, 0x89, 0x01, 0x15, 0x0b, 0x07, 0x43, 0xcf, 0x0d, 0xf0, 0x23, 0x6c, 0x77, 0xb1, 0x8f, + 0xae, 0x00, 0x74, 0xfa, 0x07, 0x41, 0x88, 0xfd, 0x1d, 0xa7, 0x5b, 0x33, 0x16, 0x8c, 0x1b, 0xe3, + 0x56, 0x91, 0x8f, 0xac, 0x75, 0xd1, 0x6b, 0x50, 0x1c, 0xe0, 0xc1, 0x2e, 0x83, 0xe6, 0x28, 0x74, + 0x8a, 0x0d, 0xac, 0x75, 0x51, 0x1d, 0xa6, 0x7c, 0x3c, 0x72, 0x88, 0xb8, 0xb5, 0xfc, 0x82, 0x71, + 0x23, 0x6f, 0x45, 0xdf, 0x64, 0xa2, 0x6f, 0xbf, 0x08, 0x77, 0x42, 0xec, 0x0f, 0x6a, 0xe3, 0x6c, + 0x22, 0x19, 0x68, 0x63, 0x7f, 0x70, 0xbf, 0xf0, 0x83, 0xbf, 0xaf, 0xe5, 0x97, 0x17, 0x6f, 0x9b, + 0xff, 0x32, 0x01, 0x65, 0xcb, 0x76, 0x7b, 0xd8, 0xc2, 0xdf, 0x3b, 0xc0, 0x41, 0x88, 0xaa, 0x90, + 0xdf, 0xc7, 0x47, 0x54, 0x8e, 0xb2, 0x45, 0x7e, 0x32, 0x42, 0x6e, 0x0f, 0xef, 0x60, 0x97, 0x49, + 0x50, 0x26, 0x84, 0xdc, 0x1e, 0x6e, 0xb9, 0x5d, 0x34, 0x07, 0x13, 0x7d, 0x67, 0xe0, 0x84, 0x9c, + 0x3d, 0xfb, 0x88, 0xc9, 0x35, 0x9e, 0x90, 0x6b, 0x05, 0x20, 0xf0, 0xfc, 0x70, 0xc7, 0xf3, 0xbb, + 0xd8, 0xaf, 0x4d, 0x2c, 0x18, 0x37, 0x2a, 0x4b, 0x6f, 0x2c, 0xaa, 0x16, 0x5e, 0x54, 0x05, 0x5a, + 0xdc, 0xf6, 0xfc, 0x70, 0x93, 0xe0, 0x5a, 0xc5, 0x40, 0xfc, 0x44, 0x1f, 0x41, 0x89, 0x12, 0x09, + 0x6d, 0xbf, 0x87, 0xc3, 0xda, 0x24, 0xa5, 0x72, 0xfd, 0x04, 0x2a, 0x6d, 0x8a, 0x6c, 0x51, 0xf6, + 0xec, 0x37, 0x32, 0xa1, 0x1c, 0x60, 0xdf, 0xb1, 0xfb, 0xce, 0x67, 0xf6, 0x6e, 0x1f, 0xd7, 0x0a, + 0x0b, 0xc6, 0x8d, 0x29, 0x2b, 0x36, 0x46, 0xd6, 0xbf, 0x8f, 0x8f, 0x82, 0x1d, 0xcf, 0xed, 0x1f, + 0xd5, 0xa6, 0x28, 0xc2, 0x14, 0x19, 0xd8, 0x74, 0xfb, 0x47, 0xd4, 0x7a, 0xde, 0x81, 0x1b, 0x32, + 0x68, 0x91, 0x42, 0x8b, 0x74, 0x84, 0x82, 0xef, 0x40, 0x75, 0xe0, 0xb8, 0x3b, 0x03, 0xaf, 0xbb, + 0x13, 0x29, 0x04, 0x88, 0x42, 0x1e, 0x14, 0x7e, 0x8f, 0x5a, 0xe0, 0x8e, 0x55, 0x19, 0x38, 0xee, + 0x13, 0xaf, 0x6b, 0x09, 0xfd, 0x90, 0x29, 0xf6, 0x61, 0x7c, 0x4a, 0x29, 0x39, 0xc5, 0x3e, 0x54, + 0xa7, 0xbc, 0x0f, 0xe7, 0x08, 0x97, 0x8e, 0x8f, 0xed, 0x10, 0xcb, 0x59, 0xe5, 0xf8, 0xac, 0xd9, + 0x81, 0xe3, 0xae, 0x50, 0x94, 0xd8, 0x44, 0xfb, 0x30, 0x35, 0x71, 0x3a, 0x39, 0xd1, 0x3e, 0x8c, + 0x4f, 0x34, 0xdf, 0x87, 0x62, 0x64, 0x17, 0x34, 0x05, 0xe3, 0x1b, 0x9b, 0x1b, 0xad, 0xea, 0x18, + 0x02, 0x98, 0x6c, 0x6e, 0xaf, 0xb4, 0x36, 0x56, 0xab, 0x06, 0x2a, 0x41, 0x61, 0xb5, 0xc5, 0x3e, + 0x72, 0xf5, 0xc2, 0x17, 0x7c, 0xbf, 0x3d, 0x06, 0x90, 0xa6, 0x40, 0x05, 0xc8, 0x3f, 0x6e, 0x7d, + 0x5a, 0x1d, 0x23, 0xc8, 0xcf, 0x5a, 0xd6, 0xf6, 0xda, 0xe6, 0x46, 0xd5, 0x20, 0x54, 0x56, 0xac, + 0x56, 0xb3, 0xdd, 0xaa, 0xe6, 0x08, 0xc6, 0x93, 0xcd, 0xd5, 0x6a, 0x1e, 0x15, 0x61, 0xe2, 0x59, + 0x73, 0xfd, 0x69, 0xab, 0x3a, 0x1e, 0x11, 0x93, 0xbb, 0xf8, 0x4f, 0x0c, 0x98, 0xe6, 0xe6, 0x66, + 0xbe, 0x85, 0xee, 0xc2, 0xe4, 0x1e, 0xf5, 0x2f, 0xba, 0x93, 0x4b, 0x4b, 0x97, 0x13, 0x7b, 0x23, + 0xe6, 0x83, 0x16, 0xc7, 0x45, 0x26, 0xe4, 0xf7, 0x47, 0x41, 0x2d, 0xb7, 0x90, 0xbf, 0x51, 0x5a, + 0xaa, 0x2e, 0xb2, 0x48, 0xb2, 0xf8, 0x18, 0x1f, 0x3d, 0xb3, 0xfb, 0x07, 0xd8, 0x22, 0x40, 0x84, + 0x60, 0x7c, 0xe0, 0xf9, 0x98, 0x6e, 0xf8, 0x29, 0x8b, 0xfe, 0x26, 0x5e, 0x40, 0x6d, 0xce, 0x37, + 0x3b, 0xfb, 0x90, 0xe2, 0xfd, 0x87, 0x01, 0xb0, 0x75, 0x10, 0x66, 0xbb, 0xd8, 0x1c, 0x4c, 0x8c, + 0x08, 0x07, 0xee, 0x5e, 0xec, 0x83, 0xfa, 0x16, 0xb6, 0x03, 0x1c, 0xf9, 0x16, 0xf9, 0x40, 0x0b, + 0x50, 0x18, 0xfa, 0x78, 0xb4, 0xb3, 0x3f, 0xa2, 0xdc, 0xa6, 0xa4, 0x9d, 0x26, 0xc9, 0xf8, 0xe3, + 0x11, 0xba, 0x09, 0x65, 0xa7, 0xe7, 0x7a, 0x3e, 0xde, 0x61, 0x44, 0x27, 0x54, 0xb4, 0x25, 0xab, + 0xc4, 0x80, 0x74, 0x49, 0x0a, 0x2e, 0x63, 0x35, 0xa9, 0xc5, 0x5d, 0x27, 0x30, 0xb9, 0x9e, 0xcf, + 0x0d, 0x28, 0xd1, 0xf5, 0x9c, 0x4a, 0xd9, 0x4b, 0x72, 0x21, 0x39, 0x3a, 0x2d, 0xa5, 0xf0, 0xd4, + 0xd2, 0xa4, 0x08, 0x2e, 0xa0, 0x55, 0xdc, 0xc7, 0x21, 0x3e, 0x4d, 0xf0, 0x52, 0x54, 0x99, 0xd7, + 0xaa, 0x52, 0xf2, 0xfb, 0x73, 0x03, 0xce, 0xc5, 0x18, 0x9e, 0x6a, 0xe9, 0x35, 0x28, 0x74, 0x29, + 0x31, 0x26, 0x53, 0xde, 0x12, 0x9f, 0xe8, 0x2e, 0x4c, 0x71, 0x91, 0x82, 0x5a, 0x5e, 0xbf, 0x0d, + 0xa5, 0x94, 0x05, 0x26, 0x65, 0x20, 0xc5, 0xfc, 0xc7, 0x1c, 0x14, 0xb9, 0x32, 0x36, 0x87, 0xa8, + 0x09, 0xd3, 0x3e, 0xfb, 0xd8, 0xa1, 0x6b, 0xe6, 0x32, 0xd6, 0xb3, 0xe3, 0xe4, 0xa3, 0x31, 0xab, + 0xcc, 0xa7, 0xd0, 0x61, 0xf4, 0x2b, 0x50, 0x12, 0x24, 0x86, 0x07, 0x21, 0x37, 0x54, 0x2d, 0x4e, + 0x40, 0x6e, 0xed, 0x47, 0x63, 0x16, 0x70, 0xf4, 0xad, 0x83, 0x10, 0xb5, 0x61, 0x4e, 0x4c, 0x66, + 0xeb, 0xe3, 0x62, 0xe4, 0x29, 0x95, 0x85, 0x38, 0x95, 0xb4, 0x39, 0x1f, 0x8d, 0x59, 0x88, 0xcf, + 0x57, 0x80, 0x68, 0x55, 0x8a, 0x14, 0x1e, 0xb2, 0xfc, 0x92, 0x12, 0xa9, 0x7d, 0xe8, 0x72, 0x22, + 0x42, 0x5b, 0xcb, 0x8a, 0x6c, 0xed, 0x43, 0x37, 0x52, 0xd9, 0x83, 0x22, 0x14, 0xf8, 0xb0, 0xf9, + 0xef, 0x39, 0x00, 0x61, 0xb1, 0xcd, 0x21, 0x5a, 0x85, 0x8a, 0xcf, 0xbf, 0x62, 0xfa, 0x7b, 0x4d, + 0xab, 0x3f, 0x6e, 0xe8, 0x31, 0x6b, 0x5a, 0x4c, 0x62, 0xe2, 0x7e, 0x08, 0xe5, 0x88, 0x8a, 0x54, + 0xe1, 0x25, 0x8d, 0x0a, 0x23, 0x0a, 0x25, 0x31, 0x81, 0x28, 0xf1, 0x13, 0x38, 0x1f, 0xcd, 0xd7, + 0x68, 0xf1, 0xf5, 0x63, 0xb4, 0x18, 0x11, 0x3c, 0x27, 0x28, 0xa8, 0x7a, 0x7c, 0xa8, 0x08, 0x26, + 0x15, 0x79, 0x49, 0xa3, 0x48, 0x86, 0xa4, 0x6a, 0x32, 0x92, 0x30, 0xa6, 0x4a, 0x20, 0x69, 0x9f, + 0x8d, 0x9b, 0x7f, 0x39, 0x0e, 0x85, 0x15, 0x6f, 0x30, 0xb4, 0x7d, 0xb2, 0x89, 0x26, 0x7d, 0x1c, + 0x1c, 0xf4, 0x43, 0xaa, 0xc0, 0xca, 0xd2, 0xb5, 0x38, 0x0f, 0x8e, 0x26, 0xfe, 0xb5, 0x28, 0xaa, + 0xc5, 0xa7, 0x90, 0xc9, 0x3c, 0xcb, 0xe7, 0x5e, 0x61, 0x32, 0xcf, 0xf1, 0x7c, 0x8a, 0x08, 0x08, + 0x79, 0x19, 0x10, 0xea, 0x50, 0xe0, 0x07, 0x3c, 0x16, 0xac, 0x1f, 0x8d, 0x59, 0x62, 0x00, 0xbd, + 0x0d, 0x33, 0xc9, 0x54, 0x38, 0xc1, 0x71, 0x2a, 0x9d, 0x78, 0xe6, 0xbc, 0x06, 0xe5, 0x58, 0x86, + 0x9e, 0xe4, 0x78, 0xa5, 0x81, 0x92, 0x97, 0x2f, 0x88, 0xb0, 0x4e, 0x8e, 0x15, 0xe5, 0x47, 0x63, + 0x22, 0xb0, 0x5f, 0x15, 0x81, 0x7d, 0x4a, 0x4d, 0xb4, 0x44, 0xaf, 0x3c, 0xc6, 0xbf, 0xa1, 0x46, + 0xad, 0x6f, 0x91, 0xc9, 0x11, 0x92, 0x0c, 0x5f, 0xa6, 0x05, 0xd3, 0x31, 0x95, 0x91, 0x1c, 0xd9, + 0xfa, 0xf8, 0x69, 0x73, 0x9d, 0x25, 0xd4, 0x87, 0x34, 0x87, 0x5a, 0x55, 0x83, 0x24, 0xe8, 0xf5, + 0xd6, 0xf6, 0x76, 0x35, 0x87, 0x2e, 0x40, 0x71, 0x63, 0xb3, 0xbd, 0xc3, 0xb0, 0xf2, 0xf5, 0xc2, + 0x1f, 0xb3, 0x48, 0x22, 0xf3, 0xf3, 0xa7, 0x11, 0x4d, 0x9e, 0xa2, 0x95, 0xcc, 0x3c, 0xa6, 0x64, + 0x66, 0x43, 0x64, 0xe6, 0x9c, 0xcc, 0xcc, 0x79, 0x84, 0x60, 0x62, 0xbd, 0xd5, 0xdc, 0xa6, 0x49, + 0x9a, 0x91, 0x5e, 0x4e, 0x67, 0xeb, 0x07, 0x15, 0x28, 0x33, 0xf3, 0xec, 0x1c, 0xb8, 0xe4, 0x30, + 0xf1, 0x57, 0x06, 0x80, 0x74, 0x58, 0xd4, 0x80, 0x42, 0x87, 0x89, 0x50, 0x33, 0x68, 0x04, 0x3c, + 0xaf, 0xb5, 0xb8, 0x25, 0xb0, 0xd0, 0x1d, 0x28, 0x04, 0x07, 0x9d, 0x0e, 0x0e, 0x44, 0xe6, 0xbe, + 0x98, 0x0c, 0xc2, 0x3c, 0x20, 0x5a, 0x02, 0x8f, 0x4c, 0x79, 0x61, 0x3b, 0xfd, 0x03, 0x9a, 0xc7, + 0x8f, 0x9f, 0xc2, 0xf1, 0x64, 0x8c, 0xfd, 0x33, 0x03, 0x4a, 0x8a, 0x5b, 0xfc, 0x82, 0x29, 0xe0, + 0x32, 0x14, 0xa9, 0x30, 0xb8, 0xcb, 0x93, 0xc0, 0x94, 0x25, 0x07, 0xd0, 0x7b, 0x50, 0x14, 0x9e, + 0x24, 0xf2, 0x40, 0x4d, 0x4f, 0x76, 0x73, 0x68, 0x49, 0x54, 0x29, 0x64, 0x1b, 0x66, 0xa9, 0x9e, + 0x3a, 0xe4, 0xf6, 0x21, 0x34, 0xab, 0x1e, 0xcb, 0x8d, 0xc4, 0xb1, 0xbc, 0x0e, 0x53, 0xc3, 0xbd, + 0xa3, 0xc0, 0xe9, 0xd8, 0x7d, 0x2e, 0x4e, 0xf4, 0x2d, 0xa9, 0x6e, 0x03, 0x52, 0xa9, 0x9e, 0x46, + 0x01, 0x92, 0xe8, 0x05, 0x28, 0x3d, 0xb2, 0x83, 0x3d, 0x2e, 0xa4, 0x1c, 0xbf, 0x0b, 0xd3, 0x64, + 0xfc, 0xf1, 0xb3, 0x57, 0x10, 0x5f, 0xcc, 0x5a, 0x36, 0xff, 0xc9, 0x80, 0x8a, 0x98, 0x76, 0x2a, + 0x03, 0x21, 0x18, 0xdf, 0xb3, 0x83, 0x3d, 0xaa, 0x8c, 0x69, 0x8b, 0xfe, 0x46, 0x6f, 0x43, 0xb5, + 0xc3, 0xd6, 0xbf, 0x93, 0xb8, 0x77, 0xcd, 0xf0, 0xf1, 0xc8, 0xf7, 0xdf, 0x85, 0x69, 0x32, 0x65, + 0x27, 0x7e, 0x0f, 0x12, 0x6e, 0xfc, 0x9e, 0x55, 0xde, 0xa3, 0x6b, 0x4e, 0x8a, 0x6f, 0x43, 0x99, + 0x29, 0xe3, 0xac, 0x65, 0x97, 0x7a, 0xad, 0xc3, 0xcc, 0xb6, 0x6b, 0x0f, 0x83, 0x3d, 0x2f, 0x4c, + 0xe8, 0x7c, 0xd9, 0xfc, 0x3b, 0x03, 0xaa, 0x12, 0x78, 0x2a, 0x19, 0xde, 0x82, 0x19, 0x1f, 0x0f, + 0x6c, 0xc7, 0x75, 0xdc, 0xde, 0xce, 0xee, 0x51, 0x88, 0x03, 0x7e, 0x7d, 0xad, 0x44, 0xc3, 0x0f, + 0xc8, 0x28, 0x11, 0x76, 0xb7, 0xef, 0xed, 0xf2, 0x20, 0x4d, 0x7f, 0xa3, 0xd7, 0xe3, 0x51, 0xba, + 0x28, 0xf5, 0x26, 0xc6, 0xa5, 0xcc, 0x3f, 0xcd, 0x41, 0xf9, 0x13, 0x3b, 0xec, 0x88, 0x1d, 0x84, + 0xd6, 0xa0, 0x12, 0x85, 0x71, 0x3a, 0xc2, 0xe5, 0x4e, 0x1c, 0x38, 0xe8, 0x1c, 0x71, 0xaf, 0x11, + 0x07, 0x8e, 0xe9, 0x8e, 0x3a, 0x40, 0x49, 0xd9, 0x6e, 0x07, 0xf7, 0x23, 0x52, 0xb9, 0x6c, 0x52, + 0x14, 0x51, 0x25, 0xa5, 0x0e, 0xa0, 0x6f, 0x43, 0x75, 0xe8, 0x7b, 0x3d, 0x1f, 0x07, 0x41, 0x44, + 0x8c, 0xa5, 0x70, 0x53, 0x43, 0x6c, 0x8b, 0xa3, 0x26, 0x4e, 0x31, 0x77, 0x1f, 0x8d, 0x59, 0x33, + 0xc3, 0x38, 0x4c, 0x06, 0xd6, 0x19, 0x79, 0xde, 0x63, 0x91, 0xf5, 0x47, 0x79, 0x40, 0xe9, 0x65, + 0x7e, 0xd5, 0x63, 0xf2, 0x75, 0xa8, 0x04, 0xa1, 0xed, 0xa7, 0xf6, 0xfc, 0x34, 0x1d, 0x8d, 0x76, + 0xfc, 0x5b, 0x10, 0x49, 0xb6, 0xe3, 0x7a, 0xa1, 0xf3, 0xe2, 0x88, 0x5d, 0x50, 0xac, 0x8a, 0x18, + 0xde, 0xa0, 0xa3, 0x68, 0x03, 0x0a, 0x2f, 0x9c, 0x7e, 0x88, 0xfd, 0xa0, 0x36, 0xb1, 0x90, 0xbf, + 0x51, 0x59, 0x7a, 0xe7, 0x24, 0xc3, 0x2c, 0x7e, 0x44, 0xf1, 0xdb, 0x47, 0x43, 0xf5, 0xf4, 0xcb, + 0x89, 0xa8, 0xc7, 0xf8, 0x49, 0xfd, 0x8d, 0xc8, 0x84, 0xa9, 0x97, 0x84, 0xe8, 0x8e, 0xd3, 0xa5, + 0xb9, 0x38, 0xf2, 0xc3, 0xbb, 0x56, 0x81, 0x02, 0xd6, 0xba, 0xe8, 0x1a, 0x4c, 0xbd, 0xf0, 0xed, + 0xde, 0x00, 0xbb, 0x21, 0xbb, 0xe5, 0x4b, 0x9c, 0x08, 0x60, 0x2e, 0x02, 0x48, 0x51, 0x48, 0xe6, + 0xdb, 0xd8, 0xdc, 0x7a, 0xda, 0xae, 0x8e, 0xa1, 0x32, 0x4c, 0x6d, 0x6c, 0xae, 0xb6, 0xd6, 0x5b, + 0x24, 0x37, 0x8a, 0x9c, 0x77, 0x47, 0x3a, 0x5d, 0x53, 0x18, 0x22, 0xb6, 0x27, 0x54, 0xb9, 0x8c, + 0xf8, 0xa5, 0x5b, 0xc8, 0x25, 0x48, 0xdc, 0x31, 0xaf, 0xc2, 0x9c, 0x6e, 0x6b, 0x08, 0x84, 0xbb, + 0xe6, 0xbf, 0xe6, 0x60, 0x9a, 0x3b, 0xc2, 0xa9, 0x3c, 0xf7, 0x92, 0x22, 0x15, 0xbf, 0x9e, 0x08, + 0x25, 0xd5, 0xa0, 0xc0, 0x1c, 0xa4, 0xcb, 0xef, 0xbf, 0xe2, 0x93, 0x04, 0x67, 0xb6, 0xdf, 0x71, + 0x97, 0x9b, 0x3d, 0xfa, 0xd6, 0x86, 0xcd, 0x89, 0xcc, 0xb0, 0x19, 0x39, 0x9c, 0x1d, 0xf0, 0x83, + 0x55, 0x51, 0x9a, 0xa2, 0x2c, 0x9c, 0x8a, 0x00, 0x63, 0x36, 0x2b, 0x64, 0xd8, 0x0c, 0x5d, 0x87, + 0x49, 0x3c, 0xc2, 0x6e, 0x18, 0xd4, 0x4a, 0x34, 0x91, 0x4e, 0x8b, 0x0b, 0x55, 0x8b, 0x8c, 0x5a, + 0x1c, 0x28, 0x4d, 0xf5, 0x21, 0xcc, 0xd2, 0xfb, 0xee, 0x43, 0xdf, 0x76, 0xd5, 0x3b, 0x7b, 0xbb, + 0xbd, 0xce, 0xd3, 0x0e, 0xf9, 0x89, 0x2a, 0x90, 0x5b, 0x5b, 0xe5, 0xfa, 0xc9, 0xad, 0xad, 0xca, + 0xf9, 0xbf, 0x6f, 0x00, 0x52, 0x09, 0x9c, 0xca, 0x16, 0x09, 0x2e, 0x42, 0x8e, 0xbc, 0x94, 0x63, + 0x0e, 0x26, 0xb0, 0xef, 0x7b, 0x3e, 0x0b, 0x94, 0x16, 0xfb, 0x90, 0xd2, 0xdc, 0xe2, 0xc2, 0x58, + 0x78, 0xe4, 0xed, 0x47, 0x11, 0x80, 0x91, 0x35, 0xd2, 0xc2, 0xb7, 0xe1, 0x5c, 0x0c, 0xfd, 0x6c, + 0x52, 0xfc, 0x26, 0xcc, 0x50, 0xaa, 0x2b, 0x7b, 0xb8, 0xb3, 0x3f, 0xf4, 0x1c, 0x37, 0x25, 0x01, + 0xba, 0x46, 0x62, 0x97, 0x48, 0x17, 0x64, 0x89, 0x6c, 0xcd, 0xe5, 0x68, 0xb0, 0xdd, 0x5e, 0x97, + 0x5b, 0x7d, 0x17, 0x2e, 0x24, 0x08, 0x8a, 0x95, 0xfd, 0x2a, 0x94, 0x3a, 0xd1, 0x60, 0xc0, 0x4f, + 0x90, 0x57, 0xe2, 0xe2, 0x26, 0xa7, 0xaa, 0x33, 0x24, 0x8f, 0x6f, 0xc3, 0xc5, 0x14, 0x8f, 0xb3, + 0x50, 0xc7, 0x5d, 0xf3, 0x36, 0x9c, 0xa7, 0x94, 0x1f, 0x63, 0x3c, 0x6c, 0xf6, 0x9d, 0xd1, 0xc9, + 0x66, 0x39, 0xe2, 0xeb, 0x55, 0x66, 0x7c, 0xbd, 0xdb, 0x4a, 0xb2, 0x6e, 0x71, 0xd6, 0x6d, 0x67, + 0x80, 0xdb, 0xde, 0x7a, 0xb6, 0xb4, 0x24, 0x91, 0xef, 0xe3, 0xa3, 0x80, 0x1f, 0x1f, 0xe9, 0x6f, + 0x19, 0xbd, 0xfe, 0xc6, 0xe0, 0xea, 0x54, 0xe9, 0x7c, 0xcd, 0xae, 0x31, 0x0f, 0xd0, 0x23, 0x3e, + 0x88, 0xbb, 0x04, 0xc0, 0x6a, 0x73, 0xca, 0x48, 0x24, 0x30, 0xc9, 0x42, 0xe5, 0xa4, 0xc0, 0x57, + 0xb8, 0xe3, 0xd0, 0xff, 0x04, 0xa9, 0x93, 0xd2, 0x9b, 0x50, 0xa2, 0x90, 0xed, 0xd0, 0x0e, 0x0f, + 0x82, 0x2c, 0xcb, 0x2d, 0x9b, 0x3f, 0x32, 0xb8, 0x47, 0x09, 0x3a, 0xa7, 0x5a, 0xf3, 0x1d, 0x98, + 0xa4, 0x37, 0x44, 0x71, 0xd3, 0xb9, 0xa4, 0xd9, 0xd8, 0x4c, 0x22, 0x8b, 0x23, 0x2a, 0xe7, 0x24, + 0x03, 0x26, 0x9f, 0xd0, 0xce, 0x81, 0x22, 0xed, 0xb8, 0xb0, 0x9c, 0x6b, 0x0f, 0x58, 0xf9, 0xb1, + 0x68, 0xd1, 0xdf, 0xf4, 0x42, 0x80, 0xb1, 0xff, 0xd4, 0x5a, 0x67, 0x37, 0x90, 0xa2, 0x15, 0x7d, + 0x13, 0xc5, 0x76, 0xfa, 0x0e, 0x76, 0x43, 0x0a, 0x1d, 0xa7, 0x50, 0x65, 0x04, 0x5d, 0x87, 0xa2, + 0x13, 0xac, 0x63, 0xdb, 0x77, 0x79, 0x89, 0x5f, 0x09, 0xcc, 0x12, 0x22, 0xf7, 0xd8, 0x77, 0xa0, + 0xca, 0x24, 0x6b, 0x76, 0xbb, 0xca, 0x69, 0x3f, 0xe2, 0x6f, 0x24, 0xf8, 0xc7, 0xe8, 0xe7, 0x4e, + 0xa6, 0xff, 0xb7, 0x06, 0xcc, 0x2a, 0x0c, 0x4e, 0x65, 0x82, 0x77, 0x61, 0x92, 0xf5, 0x5f, 0xf8, + 0x51, 0x70, 0x2e, 0x3e, 0x8b, 0xb1, 0xb1, 0x38, 0x0e, 0x5a, 0x84, 0x02, 0xfb, 0x25, 0xae, 0x71, + 0x7a, 0x74, 0x81, 0x24, 0x45, 0x5e, 0x84, 0x73, 0x1c, 0x86, 0x07, 0x9e, 0xce, 0xe7, 0xc6, 0xe3, + 0x11, 0xe2, 0x87, 0x06, 0xcc, 0xc5, 0x27, 0x9c, 0x6a, 0x95, 0x8a, 0xdc, 0xb9, 0xaf, 0x24, 0xf7, + 0xaf, 0x09, 0xb9, 0x9f, 0x0e, 0xbb, 0xca, 0x91, 0x33, 0xb9, 0xe3, 0x54, 0xeb, 0xe6, 0xe2, 0xd6, + 0x95, 0xb4, 0x7e, 0x12, 0xad, 0x49, 0x10, 0x3b, 0xd5, 0x9a, 0xde, 0x7f, 0xa5, 0x35, 0x29, 0x47, + 0xb0, 0xd4, 0xe2, 0xd6, 0xc4, 0x36, 0x5a, 0x77, 0x82, 0x28, 0xe3, 0xbc, 0x03, 0xe5, 0xbe, 0xe3, + 0x62, 0xdb, 0xe7, 0x3d, 0x24, 0x43, 0xdd, 0x8f, 0xf7, 0xac, 0x18, 0x50, 0x92, 0xfa, 0x6d, 0x03, + 0x90, 0x4a, 0xeb, 0x9b, 0xb1, 0x56, 0x43, 0x28, 0x78, 0xcb, 0xf7, 0x06, 0x5e, 0x78, 0xd2, 0x36, + 0xbb, 0x6b, 0xfe, 0xae, 0x01, 0xe7, 0x13, 0x33, 0xbe, 0x09, 0xc9, 0xef, 0x9a, 0x97, 0x61, 0x76, + 0x15, 0x8b, 0x33, 0x5e, 0xaa, 0x76, 0xb0, 0x0d, 0x48, 0x85, 0x9e, 0xcd, 0x29, 0xe6, 0x97, 0x60, + 0xf6, 0x89, 0x37, 0x22, 0x81, 0x9c, 0x80, 0x65, 0x98, 0x62, 0xc5, 0xac, 0x48, 0x5f, 0xd1, 0xb7, + 0x0c, 0xbd, 0xdb, 0x80, 0xd4, 0x99, 0x67, 0x21, 0xce, 0xb2, 0xf9, 0xbf, 0x06, 0x94, 0x9b, 0x7d, + 0xdb, 0x1f, 0x08, 0x51, 0x3e, 0x84, 0x49, 0x56, 0x99, 0xe1, 0x65, 0xd6, 0x37, 0xe3, 0xf4, 0x54, + 0x5c, 0xf6, 0xd1, 0x64, 0x75, 0x1c, 0x3e, 0x8b, 0x2c, 0x85, 0x77, 0x96, 0x57, 0x13, 0x9d, 0xe6, + 0x55, 0x74, 0x0b, 0x26, 0x6c, 0x32, 0x85, 0xa6, 0xd7, 0x4a, 0xb2, 0x5c, 0x46, 0xa9, 0x91, 0x2b, + 0x91, 0xc5, 0xb0, 0xcc, 0x0f, 0xa0, 0xa4, 0x70, 0x40, 0x05, 0xc8, 0x3f, 0x6c, 0xf1, 0x6b, 0x52, + 0x73, 0xa5, 0xbd, 0xf6, 0x8c, 0x95, 0x10, 0x2b, 0x00, 0xab, 0xad, 0xe8, 0x3b, 0xa7, 0x69, 0xec, + 0xd9, 0x9c, 0x0e, 0xcf, 0x5b, 0xaa, 0x84, 0x46, 0x96, 0x84, 0xb9, 0x57, 0x91, 0x50, 0xb2, 0xf8, + 0x2d, 0x03, 0xa6, 0xb9, 0x6a, 0x4e, 0x9b, 0x9a, 0x29, 0xe5, 0x8c, 0xd4, 0xac, 0x2c, 0xc3, 0xe2, + 0x88, 0x52, 0x86, 0x7f, 0x36, 0xa0, 0xba, 0xea, 0xbd, 0x74, 0x7b, 0xbe, 0xdd, 0x8d, 0x7c, 0xf0, + 0xa3, 0x84, 0x39, 0x17, 0x13, 0x95, 0xfe, 0x04, 0xbe, 0x1c, 0x48, 0x98, 0xb5, 0x26, 0x6b, 0x29, + 0x2c, 0xbf, 0x8b, 0x4f, 0xf3, 0x5b, 0x30, 0x93, 0x98, 0x44, 0x0c, 0xf4, 0xac, 0xb9, 0xbe, 0xb6, + 0x4a, 0x0c, 0x42, 0xeb, 0xbd, 0xad, 0x8d, 0xe6, 0x83, 0xf5, 0x16, 0xef, 0xca, 0x36, 0x37, 0x56, + 0x5a, 0xeb, 0xd2, 0x50, 0xf7, 0xc4, 0x0a, 0xee, 0x99, 0x7d, 0x98, 0x55, 0x04, 0x3a, 0x6d, 0x73, + 0x4c, 0x2f, 0xaf, 0xe4, 0x56, 0x83, 0x69, 0x7e, 0xca, 0x49, 0x3a, 0xfe, 0x7f, 0xe7, 0xa1, 0x22, + 0x40, 0x5f, 0x8f, 0x14, 0xe8, 0x02, 0x4c, 0x76, 0x77, 0xb7, 0x9d, 0xcf, 0x44, 0x5f, 0x96, 0x7f, + 0x91, 0xf1, 0x3e, 0xe3, 0xc3, 0x5e, 0x5b, 0xf0, 0x2f, 0x74, 0x99, 0x3d, 0xc4, 0x58, 0x73, 0xbb, + 0xf8, 0x90, 0x1e, 0x86, 0xc6, 0x2d, 0x39, 0x40, 0x8b, 0x9a, 0xfc, 0x55, 0x06, 0xbd, 0xeb, 0x2a, + 0xaf, 0x34, 0xd0, 0x32, 0x54, 0xc9, 0xef, 0xe6, 0x70, 0xd8, 0x77, 0x70, 0x97, 0x11, 0x20, 0xd7, + 0xdc, 0x71, 0x79, 0xda, 0x49, 0x21, 0xa0, 0xab, 0x30, 0x49, 0xaf, 0x80, 0x41, 0x6d, 0x8a, 0xe4, + 0x55, 0x89, 0xca, 0x87, 0xd1, 0xdb, 0x50, 0x62, 0x12, 0xaf, 0xb9, 0x4f, 0x03, 0x4c, 0xdf, 0x2c, + 0x28, 0xf5, 0x10, 0x15, 0x16, 0x3f, 0x67, 0x41, 0xd6, 0x39, 0x0b, 0x35, 0xa0, 0x12, 0x84, 0x9e, + 0x6f, 0xf7, 0xf0, 0x33, 0xae, 0xb2, 0x52, 0xbc, 0x68, 0x97, 0x00, 0x4b, 0x11, 0x3e, 0x3e, 0xf0, + 0x42, 0x3b, 0xfe, 0x50, 0xe1, 0x3d, 0x4b, 0x85, 0x49, 0xcb, 0x5e, 0x86, 0xd9, 0xe6, 0x41, 0xb8, + 0xd7, 0x72, 0x49, 0x1e, 0x4d, 0xd9, 0xfd, 0x0a, 0x20, 0x02, 0x5d, 0x75, 0x02, 0x2d, 0x98, 0x4f, + 0xd6, 0x6e, 0x9a, 0x7b, 0xe6, 0x06, 0x9c, 0x23, 0x50, 0xec, 0x86, 0x4e, 0x47, 0x39, 0xb3, 0x88, + 0x53, 0xb1, 0x91, 0x38, 0x15, 0xdb, 0x41, 0xf0, 0xd2, 0xf3, 0xbb, 0x7c, 0x5f, 0x44, 0xdf, 0x92, + 0xdb, 0x3f, 0x18, 0x4c, 0x9a, 0xa7, 0x41, 0xec, 0x44, 0xfb, 0x15, 0xe9, 0xa1, 0x5f, 0x86, 0x02, + 0x7f, 0x49, 0xc4, 0x0b, 0x85, 0x17, 0x16, 0xd9, 0x0b, 0xa6, 0x45, 0x4e, 0x78, 0x93, 0x41, 0x95, + 0x62, 0x16, 0xc7, 0x27, 0x16, 0xd9, 0xb3, 0x83, 0x3d, 0xdc, 0xdd, 0x12, 0xc4, 0x63, 0x65, 0xd4, + 0x7b, 0x56, 0x02, 0x2c, 0x65, 0xbf, 0x23, 0x45, 0x7f, 0x88, 0xc3, 0x63, 0x44, 0x57, 0x0b, 0xf5, + 0xe7, 0xc5, 0x14, 0xde, 0x5f, 0x7c, 0x95, 0x59, 0x3f, 0x36, 0xe0, 0x8a, 0x98, 0xb6, 0xb2, 0x67, + 0xbb, 0x3d, 0x2c, 0x84, 0xf9, 0x45, 0xf5, 0x95, 0x5e, 0x74, 0xfe, 0x15, 0x17, 0xfd, 0x18, 0x6a, + 0xd1, 0xa2, 0x69, 0xd1, 0xc6, 0xeb, 0xab, 0x8b, 0x38, 0x08, 0x78, 0xf0, 0x28, 0x5a, 0xf4, 0x37, + 0x19, 0xf3, 0xbd, 0x7e, 0x74, 0x5f, 0x22, 0xbf, 0x25, 0xb1, 0x75, 0xb8, 0x24, 0x88, 0xf1, 0x2a, + 0x4a, 0x9c, 0x5a, 0x6a, 0x4d, 0xc7, 0x52, 0xe3, 0xf6, 0x20, 0x34, 0x8e, 0xdf, 0x4a, 0xda, 0x29, + 0x71, 0x13, 0x52, 0x2e, 0x86, 0x8e, 0xcb, 0x3c, 0xf3, 0x00, 0x22, 0xb3, 0x72, 0xb4, 0x4d, 0xc1, + 0x09, 0x49, 0x2d, 0x9c, 0x6f, 0x01, 0x02, 0x4f, 0x6d, 0x81, 0x6c, 0xae, 0x18, 0xe6, 0x23, 0x41, + 0x89, 0xda, 0xb7, 0xb0, 0x3f, 0x70, 0x82, 0x40, 0xe9, 0x58, 0xe9, 0xd4, 0xf5, 0x26, 0x8c, 0x0f, + 0x31, 0xcf, 0xf3, 0xa5, 0x25, 0x24, 0x7c, 0x42, 0x99, 0x4c, 0xe1, 0x92, 0xcd, 0x00, 0xae, 0x0a, + 0x36, 0xcc, 0x20, 0x5a, 0x3e, 0x49, 0x31, 0x45, 0x95, 0x3c, 0x97, 0x51, 0x25, 0xcf, 0xc7, 0xab, + 0xe4, 0xb1, 0xb3, 0xa7, 0x1a, 0xa8, 0xce, 0xe6, 0xec, 0xd9, 0x66, 0x06, 0x88, 0xe2, 0xdb, 0xd9, + 0x50, 0xfd, 0x03, 0x1e, 0xa8, 0xce, 0x2a, 0x63, 0x62, 0xba, 0x66, 0xd1, 0xcf, 0x14, 0x9f, 0xc8, + 0x84, 0x32, 0x31, 0x92, 0xa5, 0xb6, 0x0f, 0xc6, 0xad, 0xd8, 0x98, 0x0c, 0xc6, 0xfb, 0x30, 0x17, + 0x0f, 0xc6, 0xa7, 0x12, 0x6a, 0x0e, 0x26, 0x42, 0x6f, 0x1f, 0x8b, 0x24, 0xce, 0x3e, 0x52, 0x6a, + 0x8d, 0x02, 0xf5, 0xd9, 0xa8, 0xf5, 0xbb, 0x92, 0x2a, 0x75, 0xc0, 0xd3, 0xae, 0x80, 0x6c, 0x47, + 0x71, 0x4d, 0x66, 0x1f, 0x92, 0xd7, 0x27, 0x70, 0x21, 0x19, 0x7c, 0xcf, 0x66, 0x11, 0x3b, 0xcc, + 0x39, 0x75, 0xe1, 0xf9, 0x6c, 0x18, 0x3c, 0x97, 0x71, 0x52, 0x09, 0xba, 0x67, 0x43, 0xfb, 0xd7, + 0xa1, 0xae, 0x8b, 0xc1, 0x67, 0xea, 0x8b, 0x51, 0x48, 0x3e, 0x1b, 0xaa, 0x3f, 0x34, 0x24, 0x59, + 0x75, 0xd7, 0x7c, 0xf0, 0x55, 0xc8, 0x8a, 0x5c, 0x77, 0x3b, 0xda, 0x3e, 0x8d, 0x28, 0x5a, 0xe6, + 0xf5, 0xd1, 0x52, 0x4e, 0xa1, 0x88, 0xc2, 0xff, 0x64, 0xa8, 0xff, 0x3a, 0x77, 0x2f, 0x67, 0x26, + 0xf3, 0xce, 0x69, 0x99, 0x91, 0xf4, 0x1c, 0x31, 0xa3, 0x1f, 0x29, 0x57, 0x51, 0x93, 0xd4, 0xd9, + 0x98, 0xee, 0x37, 0x64, 0x82, 0x49, 0xe5, 0xb1, 0xb3, 0xe1, 0x60, 0xc3, 0x42, 0x76, 0x0a, 0x3b, + 0x13, 0x16, 0x37, 0x9b, 0x50, 0x8c, 0x2e, 0xc9, 0xca, 0x93, 0xde, 0x12, 0x14, 0x36, 0x36, 0xb7, + 0xb7, 0x9a, 0x2b, 0xe4, 0x0e, 0x38, 0x07, 0x85, 0x95, 0x4d, 0xcb, 0x7a, 0xba, 0xd5, 0x26, 0x97, + 0xc0, 0xe4, 0x0b, 0x9f, 0xa5, 0x9f, 0xe5, 0x21, 0xf7, 0xf8, 0x19, 0xfa, 0x14, 0x26, 0xd8, 0x0b, + 0xb3, 0x63, 0x1e, 0x1a, 0xd6, 0x8f, 0x7b, 0x44, 0x67, 0x5e, 0xfc, 0xc1, 0x7f, 0xfd, 0xec, 0x0f, + 0x73, 0xb3, 0x66, 0xb9, 0x31, 0x5a, 0x6e, 0xec, 0x8f, 0x1a, 0x34, 0xc9, 0xde, 0x37, 0x6e, 0xa2, + 0x8f, 0x21, 0xbf, 0x75, 0x10, 0xa2, 0xcc, 0x07, 0x88, 0xf5, 0xec, 0x77, 0x75, 0xe6, 0x79, 0x4a, + 0x74, 0xc6, 0x04, 0x4e, 0x74, 0x78, 0x10, 0x12, 0x92, 0xdf, 0x83, 0x92, 0xfa, 0x2a, 0xee, 0xc4, + 0x57, 0x89, 0xf5, 0x93, 0x5f, 0xdc, 0x99, 0x57, 0x28, 0xab, 0x8b, 0x26, 0xe2, 0xac, 0xd8, 0xbb, + 0x3d, 0x75, 0x15, 0xed, 0x43, 0x17, 0x65, 0xbe, 0x59, 0xac, 0x67, 0x3f, 0xc2, 0x4b, 0xad, 0x22, + 0x3c, 0x74, 0x09, 0xc9, 0xef, 0xf2, 0xd7, 0x76, 0x9d, 0x10, 0x5d, 0xd5, 0x3c, 0x97, 0x52, 0x9f, + 0x01, 0xd5, 0x17, 0xb2, 0x11, 0x38, 0x93, 0xcb, 0x94, 0xc9, 0x05, 0x73, 0x96, 0x33, 0xe9, 0x44, + 0x28, 0xf7, 0x8d, 0x9b, 0x4b, 0x1d, 0x98, 0xa0, 0x6d, 0x66, 0xf4, 0x5c, 0xfc, 0xa8, 0x6b, 0x1a, + 0xf8, 0x19, 0x86, 0x8e, 0x35, 0xa8, 0xcd, 0x39, 0xca, 0xa8, 0x62, 0x16, 0x09, 0x23, 0xda, 0x64, + 0xbe, 0x6f, 0xdc, 0xbc, 0x61, 0xdc, 0x36, 0x96, 0xfe, 0x7a, 0x02, 0x26, 0x68, 0x3b, 0x03, 0xed, + 0x03, 0xc8, 0x76, 0x6a, 0x72, 0x75, 0xa9, 0x4e, 0x6d, 0x72, 0x75, 0xe9, 0x4e, 0xac, 0x59, 0xa7, + 0x4c, 0xe7, 0xcc, 0x19, 0xc2, 0x94, 0x76, 0x49, 0x1a, 0xb4, 0x29, 0x44, 0xf4, 0xf8, 0x63, 0x83, + 0xf7, 0x75, 0x98, 0x9b, 0x21, 0x1d, 0xb5, 0x58, 0x2b, 0x35, 0xb9, 0x1d, 0x34, 0xdd, 0x53, 0xf3, + 0x1e, 0x65, 0xd8, 0x30, 0xab, 0x92, 0xa1, 0x4f, 0x31, 0xee, 0x1b, 0x37, 0x9f, 0xd7, 0xcc, 0x73, + 0x5c, 0xcb, 0x09, 0x08, 0xfa, 0x3e, 0x54, 0xe2, 0x4d, 0x3f, 0x74, 0x4d, 0xc3, 0x2b, 0xd9, 0x44, + 0xac, 0xbf, 0x71, 0x3c, 0x12, 0x97, 0x69, 0x9e, 0xca, 0xc4, 0x99, 0x33, 0xce, 0xfb, 0x18, 0x0f, + 0x6d, 0x82, 0xc4, 0x6d, 0x80, 0xfe, 0xd4, 0xe0, 0x7d, 0x5b, 0xd9, 0xb3, 0x43, 0x3a, 0xea, 0xa9, + 0xd6, 0x60, 0xfd, 0xfa, 0x09, 0x58, 0x5c, 0x88, 0x0f, 0xa8, 0x10, 0xef, 0x9b, 0x73, 0x52, 0x88, + 0xd0, 0x19, 0xe0, 0xd0, 0xe3, 0x52, 0x3c, 0xbf, 0x6c, 0x5e, 0x8c, 0x29, 0x27, 0x06, 0x95, 0xc6, + 0x62, 0xbd, 0x35, 0xad, 0xb1, 0x62, 0xed, 0x3b, 0xad, 0xb1, 0xe2, 0x8d, 0x39, 0x9d, 0xb1, 0x78, + 0x27, 0x4d, 0x63, 0xac, 0x08, 0xb2, 0xf4, 0xff, 0xe3, 0x50, 0x58, 0x61, 0xff, 0xd7, 0x0e, 0xf2, + 0xa0, 0x18, 0x75, 0x9b, 0xd0, 0xbc, 0xae, 0xa0, 0x2d, 0xaf, 0x72, 0xf5, 0xab, 0x99, 0x70, 0x2e, + 0xd0, 0xeb, 0x54, 0xa0, 0xd7, 0xcc, 0x0b, 0x84, 0x33, 0xff, 0x1f, 0x83, 0x1a, 0xac, 0xec, 0xd9, + 0xb0, 0xbb, 0x5d, 0xa2, 0x88, 0xdf, 0x84, 0xb2, 0xda, 0xfb, 0x41, 0xaf, 0x6b, 0x8b, 0xe8, 0x6a, + 0x23, 0xa9, 0x6e, 0x1e, 0x87, 0xc2, 0x39, 0xbf, 0x41, 0x39, 0xcf, 0x9b, 0x97, 0x34, 0x9c, 0x7d, + 0x8a, 0x1a, 0x63, 0xce, 0x9a, 0x34, 0x7a, 0xe6, 0xb1, 0x6e, 0x90, 0x9e, 0x79, 0xbc, 0xc7, 0x73, + 0x2c, 0xf3, 0x03, 0x8a, 0x4a, 0x98, 0x07, 0x00, 0xb2, 0x8b, 0x82, 0xb4, 0xba, 0x54, 0x2e, 0xac, + 0xc9, 0xe0, 0x90, 0x6e, 0xc0, 0x98, 0x26, 0x65, 0xcb, 0xf7, 0x5d, 0x82, 0x6d, 0xdf, 0x09, 0x42, + 0xe6, 0x98, 0xd3, 0xb1, 0x1e, 0x08, 0xd2, 0xae, 0x27, 0xde, 0x52, 0xa9, 0x5f, 0x3b, 0x16, 0x87, + 0x73, 0xbf, 0x4e, 0xb9, 0x5f, 0x35, 0xeb, 0x1a, 0xee, 0x43, 0x86, 0x4b, 0x36, 0xdb, 0xe7, 0x05, + 0x28, 0x3d, 0xb1, 0x1d, 0x37, 0xc4, 0xae, 0xed, 0x76, 0x30, 0xda, 0x85, 0x09, 0x9a, 0xbb, 0x93, + 0x81, 0x58, 0x2d, 0xf9, 0x27, 0x03, 0x71, 0xac, 0xe6, 0x6d, 0x2e, 0x50, 0xc6, 0x75, 0xf3, 0x3c, + 0x61, 0x3c, 0x90, 0xa4, 0x1b, 0xac, 0x5a, 0x6e, 0xdc, 0x44, 0x2f, 0x60, 0x92, 0xf7, 0xba, 0x13, + 0x84, 0x62, 0x45, 0xb5, 0xfa, 0x65, 0x3d, 0x50, 0xb7, 0x97, 0x55, 0x36, 0x01, 0xc5, 0x23, 0x7c, + 0x46, 0x00, 0xb2, 0x75, 0x93, 0xb4, 0x68, 0xaa, 0xe5, 0x53, 0x5f, 0xc8, 0x46, 0xd0, 0xe9, 0x54, + 0xe5, 0xd9, 0x8d, 0x70, 0x09, 0xdf, 0xef, 0xc0, 0xf8, 0x23, 0x3b, 0xd8, 0x43, 0x89, 0xdc, 0xab, + 0x3c, 0x4d, 0xad, 0xd7, 0x75, 0x20, 0xce, 0xe5, 0x2a, 0xe5, 0x72, 0x89, 0x85, 0x32, 0x95, 0x0b, + 0x7d, 0x7c, 0xc9, 0xf4, 0xc7, 0xde, 0xa5, 0x26, 0xf5, 0x17, 0x7b, 0xe4, 0x9a, 0xd4, 0x5f, 0xfc, + 0x29, 0x6b, 0xb6, 0xfe, 0x08, 0x97, 0xfd, 0x11, 0xe1, 0x33, 0x84, 0x29, 0xf1, 0x82, 0x13, 0x25, + 0xde, 0xbd, 0x24, 0x9e, 0x7d, 0xd6, 0xe7, 0xb3, 0xc0, 0x9c, 0xdb, 0x35, 0xca, 0xed, 0x8a, 0x59, + 0x4b, 0x59, 0x8b, 0x63, 0xde, 0x37, 0x6e, 0xde, 0x36, 0xd0, 0xf7, 0x01, 0x64, 0x77, 0x2b, 0xe5, + 0x83, 0xc9, 0x8e, 0x59, 0xca, 0x07, 0x53, 0x8d, 0x31, 0x73, 0x91, 0xf2, 0xbd, 0x61, 0x5e, 0x4b, + 0xf2, 0x0d, 0x7d, 0xdb, 0x0d, 0x5e, 0x60, 0xff, 0x16, 0x2b, 0xad, 0x07, 0x7b, 0xce, 0x90, 0x2c, + 0xd9, 0x87, 0x62, 0xd4, 0x7c, 0x48, 0xc6, 0xdb, 0x64, 0x9b, 0x24, 0x19, 0x6f, 0x53, 0x5d, 0x8b, + 0x78, 0xe0, 0x89, 0xed, 0x17, 0x81, 0x4a, 0x5c, 0xf0, 0x2f, 0xaa, 0x30, 0x4e, 0x8e, 0xe4, 0xe4, + 0x78, 0x22, 0xcb, 0x3d, 0xc9, 0xd5, 0xa7, 0x2a, 0xd6, 0xc9, 0xd5, 0xa7, 0x2b, 0x45, 0xf1, 0xe3, + 0x09, 0xb9, 0xae, 0x35, 0x58, 0x1d, 0x85, 0xac, 0xd4, 0x83, 0x92, 0x52, 0x06, 0x42, 0x1a, 0x62, + 0xf1, 0x0a, 0x78, 0x32, 0xe1, 0x69, 0x6a, 0x48, 0xe6, 0x6b, 0x94, 0xdf, 0x79, 0x96, 0xf0, 0x28, + 0xbf, 0x2e, 0xc3, 0x20, 0x0c, 0xf9, 0xea, 0xb8, 0xe7, 0x6b, 0x56, 0x17, 0xf7, 0xfe, 0x85, 0x6c, + 0x84, 0xcc, 0xd5, 0x49, 0xd7, 0x7f, 0x09, 0x65, 0xb5, 0xf4, 0x83, 0x34, 0xc2, 0x27, 0x6a, 0xf4, + 0xc9, 0x4c, 0xa2, 0xab, 0x1c, 0xc5, 0x63, 0x1b, 0x65, 0x69, 0x2b, 0x68, 0x84, 0x71, 0x1f, 0x0a, + 0xbc, 0x04, 0xa4, 0x53, 0x69, 0xbc, 0x8c, 0xaf, 0x53, 0x69, 0xa2, 0x7e, 0x14, 0x3f, 0x3f, 0x53, + 0x8e, 0xe4, 0x2a, 0x2a, 0xb2, 0x35, 0xe7, 0xf6, 0x10, 0x87, 0x59, 0xdc, 0x64, 0xd9, 0x36, 0x8b, + 0x9b, 0x52, 0x21, 0xc8, 0xe2, 0xd6, 0xc3, 0x21, 0x8f, 0x07, 0xe2, 0x7a, 0x8d, 0x32, 0x88, 0xa9, + 0x19, 0xd2, 0x3c, 0x0e, 0x45, 0x77, 0xbd, 0x91, 0x0c, 0x45, 0x7a, 0x3c, 0x04, 0x90, 0xe5, 0xa8, + 0xe4, 0x99, 0x55, 0xdb, 0x29, 0x48, 0x9e, 0x59, 0xf5, 0x15, 0xad, 0x78, 0x8c, 0x95, 0x7c, 0xd9, + 0xed, 0x8a, 0x70, 0xfe, 0xc2, 0x00, 0x94, 0x2e, 0x58, 0xa1, 0x77, 0xf4, 0xd4, 0xb5, 0x5d, 0x87, + 0xfa, 0xbb, 0xaf, 0x86, 0xac, 0x0b, 0xc8, 0x52, 0xa4, 0x0e, 0xc5, 0x1e, 0xbe, 0x24, 0x42, 0x7d, + 0x6e, 0xc0, 0x74, 0xac, 0xc8, 0x85, 0xde, 0xcc, 0xb0, 0x69, 0xa2, 0xf5, 0x50, 0x7f, 0xeb, 0x44, + 0x3c, 0xdd, 0x61, 0x5e, 0xd9, 0x01, 0xe2, 0x56, 0xf3, 0x3b, 0x06, 0x54, 0xe2, 0xb5, 0x30, 0x94, + 0x41, 0x3b, 0xd5, 0xb1, 0xa8, 0xdf, 0x38, 0x19, 0xf1, 0x78, 0xf3, 0xc8, 0x0b, 0x4d, 0x1f, 0x0a, + 0xbc, 0x68, 0xa6, 0xdb, 0xf8, 0xf1, 0x16, 0x87, 0x6e, 0xe3, 0x27, 0x2a, 0x6e, 0x9a, 0x8d, 0xef, + 0x7b, 0x7d, 0xac, 0xb8, 0x19, 0xaf, 0xa5, 0x65, 0x71, 0x3b, 0xde, 0xcd, 0x12, 0x85, 0xb8, 0x2c, + 0x6e, 0xd2, 0xcd, 0x44, 0xc9, 0x0c, 0x65, 0x10, 0x3b, 0xc1, 0xcd, 0x92, 0x15, 0x37, 0x8d, 0x9b, + 0x51, 0x86, 0x8a, 0x9b, 0xc9, 0x52, 0x96, 0xce, 0xcd, 0x52, 0xdd, 0x18, 0x9d, 0x9b, 0xa5, 0xab, + 0x61, 0x1a, 0x3b, 0x52, 0xbe, 0x31, 0x37, 0x3b, 0xa7, 0x29, 0x76, 0xa1, 0x77, 0x33, 0x94, 0xa8, + 0xed, 0xed, 0xd4, 0x6f, 0xbd, 0x22, 0x76, 0xe6, 0x1e, 0x67, 0xea, 0x17, 0x7b, 0xfc, 0x8f, 0x0c, + 0x98, 0xd3, 0xd5, 0xc7, 0x50, 0x06, 0x9f, 0x8c, 0x56, 0x50, 0x7d, 0xf1, 0x55, 0xd1, 0x8f, 0xd7, + 0x56, 0xb4, 0xeb, 0x1f, 0xf4, 0xbe, 0x68, 0x36, 0x9e, 0x5f, 0x85, 0x2b, 0x30, 0xd9, 0x1c, 0x3a, + 0x8f, 0xf1, 0x11, 0x3a, 0x37, 0x95, 0xab, 0x4f, 0x13, 0xba, 0x9e, 0xef, 0x7c, 0x46, 0xff, 0x3c, + 0xc4, 0x42, 0x6e, 0xb7, 0x0c, 0x10, 0x21, 0x8c, 0xfd, 0xdb, 0x97, 0xf3, 0xc6, 0x7f, 0x7e, 0x39, + 0x6f, 0xfc, 0xcf, 0x97, 0xf3, 0xc6, 0x4f, 0xff, 0x6f, 0x7e, 0xec, 0xf9, 0xb5, 0x9e, 0x47, 0xc5, + 0x5a, 0x74, 0xbc, 0x86, 0xfc, 0x93, 0x15, 0xcb, 0x0d, 0x55, 0xd4, 0xdd, 0x49, 0xfa, 0x37, 0x26, + 0x96, 0x7f, 0x1e, 0x00, 0x00, 0xff, 0xff, 0x52, 0x87, 0xa2, 0xad, 0x3a, 0x43, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -9461,6 +9528,11 @@ func (m *HashKVResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if m.HashRevision != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.HashRevision)) + i-- + dAtA[i] = 0x20 + } if m.CompactRevision != 0 { i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision)) i-- @@ -9581,6 +9653,13 @@ func (m *SnapshotResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x22 + } if len(m.Blob) > 0 { i -= len(m.Blob) copy(dAtA[i:], m.Blob) @@ -11481,6 +11560,18 @@ func (m *StatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if m.DbSizeQuota != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.DbSizeQuota)) + i-- + dAtA[i] = 0x60 + } + if len(m.StorageVersion) > 0 { + i -= len(m.StorageVersion) + copy(dAtA[i:], m.StorageVersion) + i = encodeVarintRpc(dAtA, i, uint64(len(m.StorageVersion))) + i-- + dAtA[i] = 0x5a + } if m.IsLearner { i-- if m.IsLearner { @@ -13459,6 +13550,9 @@ func (m *HashKVResponse) Size() (n int) { if m.CompactRevision != 0 { n += 1 + sovRpc(uint64(m.CompactRevision)) } + if m.HashRevision != 0 { + n += 1 + sovRpc(uint64(m.HashRevision)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -13513,6 +13607,10 @@ func (m *SnapshotResponse) Size() (n int) { if l > 0 { n += 1 + l + sovRpc(uint64(l)) } + l = len(m.Version) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -14389,6 +14487,13 @@ func (m *StatusResponse) Size() (n int) { if m.IsLearner { n += 2 } + l = len(m.StorageVersion) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.DbSizeQuota != 0 { + n += 1 + sovRpc(uint64(m.DbSizeQuota)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -17568,6 +17673,25 @@ func (m *HashKVResponse) Unmarshal(dAtA []byte) error { break } } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HashRevision", wireType) + } + m.HashRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HashRevision |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -17865,6 +17989,38 @@ func (m *SnapshotResponse) Unmarshal(dAtA []byte) error { m.Blob = []byte{} } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -22459,6 +22615,57 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error { } } m.IsLearner = bool(v != 0) + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StorageVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DbSizeQuota", wireType) + } + m.DbSizeQuota = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DbSizeQuota |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) diff --git a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/rpc.proto b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/rpc.proto index 4ccc234744c2c..a4ace71ce2bdb 100644 --- a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/rpc.proto +++ b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/rpc.proto @@ -4,13 +4,36 @@ package etcdserverpb; import "gogoproto/gogo.proto"; import "etcd/api/mvccpb/kv.proto"; import "etcd/api/authpb/auth.proto"; +import "etcd/api/versionpb/version.proto"; // for grpc-gateway import "google/api/annotations.proto"; +import "protoc-gen-openapiv2/options/annotations.proto"; + +option go_package = "go.etcd.io/etcd/api/v3/etcdserverpb"; option (gogoproto.marshaler_all) = true; option (gogoproto.unmarshaler_all) = true; +option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { + security_definitions: { + security: { + key: "ApiKey"; + value: { + type: TYPE_API_KEY; + in: IN_HEADER; + name: "Authorization"; + } + } + } + security: { + security_requirement: { + key: "ApiKey"; + value: {}; + } + } +}; + service KV { // Range gets the keys in the range from the key-value store. rpc Range(RangeRequest) returns (RangeResponse) { @@ -388,13 +411,16 @@ service Auth { } message ResponseHeader { + option (versionpb.etcd_version_msg) = "3.0"; + // cluster_id is the ID of the cluster which sent the response. uint64 cluster_id = 1; // member_id is the ID of the member which sent the response. uint64 member_id = 2; - // revision is the key-value store revision when the request was applied. + // revision is the key-value store revision when the request was applied, and it's + // unset (so 0) in case of calls not interacting with key-value store. // For watch progress responses, the header.revision indicates progress. All future events - // recieved in this stream are guaranteed to have a higher revision number than the + // received in this stream are guaranteed to have a higher revision number than the // header.revision number. int64 revision = 3; // raft_term is the raft term when the request was applied. @@ -402,17 +428,21 @@ message ResponseHeader { } message RangeRequest { + option (versionpb.etcd_version_msg) = "3.0"; + enum SortOrder { - NONE = 0; // default, no sorting - ASCEND = 1; // lowest target value first - DESCEND = 2; // highest target value first + option (versionpb.etcd_version_enum) = "3.0"; + NONE = 0; // default, no sorting + ASCEND = 1; // lowest target value first + DESCEND = 2; // highest target value first } enum SortTarget { - KEY = 0; - VERSION = 1; - CREATE = 2; - MOD = 3; - VALUE = 4; + option (versionpb.etcd_version_enum) = "3.0"; + KEY = 0; + VERSION = 1; + CREATE = 2; + MOD = 3; + VALUE = 4; } // key is the first key for the range. If range_end is not given, the request only looks up key. @@ -453,22 +483,24 @@ message RangeRequest { // min_mod_revision is the lower bound for returned key mod revisions; all keys with // lesser mod revisions will be filtered away. - int64 min_mod_revision = 10; + int64 min_mod_revision = 10 [(versionpb.etcd_version_field)="3.1"]; // max_mod_revision is the upper bound for returned key mod revisions; all keys with // greater mod revisions will be filtered away. - int64 max_mod_revision = 11; + int64 max_mod_revision = 11 [(versionpb.etcd_version_field)="3.1"]; // min_create_revision is the lower bound for returned key create revisions; all keys with // lesser create revisions will be filtered away. - int64 min_create_revision = 12; + int64 min_create_revision = 12 [(versionpb.etcd_version_field)="3.1"]; // max_create_revision is the upper bound for returned key create revisions; all keys with // greater create revisions will be filtered away. - int64 max_create_revision = 13; + int64 max_create_revision = 13 [(versionpb.etcd_version_field)="3.1"]; } message RangeResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; // kvs is the list of key-value pairs matched by the range request. // kvs is empty when count is requested. @@ -480,6 +512,8 @@ message RangeResponse { } message PutRequest { + option (versionpb.etcd_version_msg) = "3.0"; + // key is the key, in bytes, to put into the key-value store. bytes key = 1; // value is the value, in bytes, to associate with the key in the key-value store. @@ -490,24 +524,28 @@ message PutRequest { // If prev_kv is set, etcd gets the previous key-value pair before changing it. // The previous key-value pair will be returned in the put response. - bool prev_kv = 4; + bool prev_kv = 4 [(versionpb.etcd_version_field)="3.1"]; // If ignore_value is set, etcd updates the key using its current value. // Returns an error if the key does not exist. - bool ignore_value = 5; + bool ignore_value = 5 [(versionpb.etcd_version_field)="3.2"]; // If ignore_lease is set, etcd updates the key using its current lease. // Returns an error if the key does not exist. - bool ignore_lease = 6; + bool ignore_lease = 6 [(versionpb.etcd_version_field)="3.2"]; } message PutResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; // if prev_kv is set in the request, the previous key-value pair will be returned. - mvccpb.KeyValue prev_kv = 2; + mvccpb.KeyValue prev_kv = 2 [(versionpb.etcd_version_field)="3.1"]; } message DeleteRangeRequest { + option (versionpb.etcd_version_msg) = "3.0"; + // key is the first key to delete in the range. bytes key = 1; // range_end is the key following the last key to delete for the range [key, range_end). @@ -519,50 +557,61 @@ message DeleteRangeRequest { // If prev_kv is set, etcd gets the previous key-value pairs before deleting it. // The previous key-value pairs will be returned in the delete response. - bool prev_kv = 3; + bool prev_kv = 3 [(versionpb.etcd_version_field)="3.1"]; } message DeleteRangeResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; // deleted is the number of keys deleted by the delete range request. int64 deleted = 2; // if prev_kv is set in the request, the previous key-value pairs will be returned. - repeated mvccpb.KeyValue prev_kvs = 3; + repeated mvccpb.KeyValue prev_kvs = 3 [(versionpb.etcd_version_field)="3.1"]; } message RequestOp { + option (versionpb.etcd_version_msg) = "3.0"; // request is a union of request types accepted by a transaction. oneof request { RangeRequest request_range = 1; PutRequest request_put = 2; DeleteRangeRequest request_delete_range = 3; - TxnRequest request_txn = 4; + TxnRequest request_txn = 4 [(versionpb.etcd_version_field)="3.3"]; } } message ResponseOp { + option (versionpb.etcd_version_msg) = "3.0"; + // response is a union of response types returned by a transaction. oneof response { RangeResponse response_range = 1; PutResponse response_put = 2; DeleteRangeResponse response_delete_range = 3; - TxnResponse response_txn = 4; + TxnResponse response_txn = 4 [(versionpb.etcd_version_field)="3.3"]; } } message Compare { + option (versionpb.etcd_version_msg) = "3.0"; + enum CompareResult { + option (versionpb.etcd_version_enum) = "3.0"; + EQUAL = 0; GREATER = 1; LESS = 2; - NOT_EQUAL = 3; + NOT_EQUAL = 3 [(versionpb.etcd_version_enum_value)="3.1"]; } enum CompareTarget { + option (versionpb.etcd_version_enum) = "3.0"; + VERSION = 0; CREATE = 1; MOD = 2; VALUE = 3; - LEASE = 4; + LEASE = 4 [(versionpb.etcd_version_enum_value)="3.3"]; } // result is logical comparison operation for this comparison. CompareResult result = 1; @@ -580,13 +629,13 @@ message Compare { // value is the value of the given key, in bytes. bytes value = 7; // lease is the lease id of the given key. - int64 lease = 8; + int64 lease = 8 [(versionpb.etcd_version_field)="3.3"]; // leave room for more target_union field tags, jump to 64 } // range_end compares the given target to all keys in the range [key, range_end). // See RangeRequest for more details on key ranges. - bytes range_end = 64; + bytes range_end = 64 [(versionpb.etcd_version_field)="3.3"]; // TODO: fill out with most of the rest of RangeRequest fields when needed. } @@ -606,6 +655,8 @@ message Compare { // true. // 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false. message TxnRequest { + option (versionpb.etcd_version_msg) = "3.0"; + // compare is a list of predicates representing a conjunction of terms. // If the comparisons succeed, then the success requests will be processed in order, // and the response will contain their respective responses in order. @@ -619,6 +670,8 @@ message TxnRequest { } message TxnResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; // succeeded is set to true if the compare evaluated to true or false otherwise. bool succeeded = 2; @@ -630,6 +683,8 @@ message TxnResponse { // CompactionRequest compacts the key-value store up to a given revision. All superseded keys // with a revision less than the compaction revision will be removed. message CompactionRequest { + option (versionpb.etcd_version_msg) = "3.0"; + // revision is the key-value store revision for the compaction operation. int64 revision = 1; // physical is set so the RPC will wait until the compaction is physically @@ -639,35 +694,48 @@ message CompactionRequest { } message CompactionResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; } message HashRequest { + option (versionpb.etcd_version_msg) = "3.0"; } message HashKVRequest { + option (versionpb.etcd_version_msg) = "3.3"; // revision is the key-value store revision for the hash operation. int64 revision = 1; } message HashKVResponse { + option (versionpb.etcd_version_msg) = "3.3"; + ResponseHeader header = 1; // hash is the hash value computed from the responding member's MVCC keys up to a given revision. uint32 hash = 2; // compact_revision is the compacted revision of key-value store when hash begins. int64 compact_revision = 3; + // hash_revision is the revision up to which the hash is calculated. + int64 hash_revision = 4 [(versionpb.etcd_version_field)="3.6"]; } message HashResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; // hash is the hash value computed from the responding member's KV's backend. uint32 hash = 2; } message SnapshotRequest { + option (versionpb.etcd_version_msg) = "3.3"; } message SnapshotResponse { + option (versionpb.etcd_version_msg) = "3.3"; + // header has the current key-value store information. The first header in the snapshot // stream indicates the point in time of the snapshot. ResponseHeader header = 1; @@ -677,18 +745,26 @@ message SnapshotResponse { // blob contains the next chunk of the snapshot in the snapshot stream. bytes blob = 3; + + // local version of server that created the snapshot. + // In cluster with binaries with different version, each cluster can return different result. + // Informs which etcd server version should be used when restoring the snapshot. + string version = 4 [(versionpb.etcd_version_field)="3.6"]; } message WatchRequest { + option (versionpb.etcd_version_msg) = "3.0"; // request_union is a request to either create a new watcher or cancel an existing watcher. oneof request_union { WatchCreateRequest create_request = 1; WatchCancelRequest cancel_request = 2; - WatchProgressRequest progress_request = 3; + WatchProgressRequest progress_request = 3 [(versionpb.etcd_version_field)="3.4"]; } } message WatchCreateRequest { + option (versionpb.etcd_version_msg) = "3.0"; + // key is the key to register for watching. bytes key = 1; @@ -709,6 +785,8 @@ message WatchCreateRequest { bool progress_notify = 4; enum FilterType { + option (versionpb.etcd_version_enum) = "3.1"; + // filter out put event. NOPUT = 0; // filter out delete event. @@ -716,34 +794,38 @@ message WatchCreateRequest { } // filters filter the events at server side before it sends back to the watcher. - repeated FilterType filters = 5; + repeated FilterType filters = 5 [(versionpb.etcd_version_field)="3.1"]; // If prev_kv is set, created watcher gets the previous KV before the event happens. // If the previous KV is already compacted, nothing will be returned. - bool prev_kv = 6; + bool prev_kv = 6 [(versionpb.etcd_version_field)="3.1"]; // If watch_id is provided and non-zero, it will be assigned to this watcher. // Since creating a watcher in etcd is not a synchronous operation, // this can be used ensure that ordering is correct when creating multiple // watchers on the same stream. Creating a watcher with an ID already in // use on the stream will cause an error to be returned. - int64 watch_id = 7; + int64 watch_id = 7 [(versionpb.etcd_version_field)="3.4"]; // fragment enables splitting large revisions into multiple watch responses. - bool fragment = 8; + bool fragment = 8 [(versionpb.etcd_version_field)="3.4"]; } message WatchCancelRequest { + option (versionpb.etcd_version_msg) = "3.1"; // watch_id is the watcher id to cancel so that no more events are transmitted. - int64 watch_id = 1; + int64 watch_id = 1 [(versionpb.etcd_version_field)="3.1"]; } // Requests the a watch stream progress status be sent in the watch response stream as soon as // possible. message WatchProgressRequest { + option (versionpb.etcd_version_msg) = "3.4"; } message WatchResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; // watch_id is the ID of the watcher that corresponds to the response. int64 watch_id = 2; @@ -754,7 +836,8 @@ message WatchResponse { // All events sent to the created watcher will attach with the same watch_id. bool created = 3; - // canceled is set to true if the response is for a cancel watch request. + // canceled is set to true if the response is for a cancel watch request + // or if the start_revision has already been compacted. // No further events will be sent to the canceled watcher. bool canceled = 4; @@ -769,15 +852,17 @@ message WatchResponse { int64 compact_revision = 5; // cancel_reason indicates the reason for canceling the watcher. - string cancel_reason = 6; + string cancel_reason = 6 [(versionpb.etcd_version_field)="3.4"]; // framgment is true if large watch response was split over multiple responses. - bool fragment = 7; + bool fragment = 7 [(versionpb.etcd_version_field)="3.4"]; repeated mvccpb.Event events = 11; } message LeaseGrantRequest { + option (versionpb.etcd_version_msg) = "3.0"; + // TTL is the advisory time-to-live in seconds. Expired lease will return -1. int64 TTL = 1; // ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID. @@ -785,6 +870,8 @@ message LeaseGrantRequest { } message LeaseGrantResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; // ID is the lease ID for the granted lease. int64 ID = 2; @@ -794,15 +881,21 @@ message LeaseGrantResponse { } message LeaseRevokeRequest { + option (versionpb.etcd_version_msg) = "3.0"; + // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted. int64 ID = 1; } message LeaseRevokeResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; } message LeaseCheckpoint { + option (versionpb.etcd_version_msg) = "3.4"; + // ID is the lease ID to checkpoint. int64 ID = 1; @@ -811,19 +904,26 @@ message LeaseCheckpoint { } message LeaseCheckpointRequest { + option (versionpb.etcd_version_msg) = "3.4"; + repeated LeaseCheckpoint checkpoints = 1; } message LeaseCheckpointResponse { + option (versionpb.etcd_version_msg) = "3.4"; + ResponseHeader header = 1; } message LeaseKeepAliveRequest { + option (versionpb.etcd_version_msg) = "3.0"; // ID is the lease ID for the lease to keep alive. int64 ID = 1; } message LeaseKeepAliveResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; // ID is the lease ID from the keep alive request. int64 ID = 2; @@ -832,6 +932,7 @@ message LeaseKeepAliveResponse { } message LeaseTimeToLiveRequest { + option (versionpb.etcd_version_msg) = "3.1"; // ID is the lease ID for the lease. int64 ID = 1; // keys is true to query all the keys attached to this lease. @@ -839,6 +940,8 @@ message LeaseTimeToLiveRequest { } message LeaseTimeToLiveResponse { + option (versionpb.etcd_version_msg) = "3.1"; + ResponseHeader header = 1; // ID is the lease ID from the keep alive request. int64 ID = 2; @@ -851,19 +954,26 @@ message LeaseTimeToLiveResponse { } message LeaseLeasesRequest { + option (versionpb.etcd_version_msg) = "3.3"; } message LeaseStatus { + option (versionpb.etcd_version_msg) = "3.3"; + int64 ID = 1; // TODO: int64 TTL = 2; } message LeaseLeasesResponse { + option (versionpb.etcd_version_msg) = "3.3"; + ResponseHeader header = 1; repeated LeaseStatus leases = 2; } message Member { + option (versionpb.etcd_version_msg) = "3.0"; + // ID is the member ID for this member. uint64 ID = 1; // name is the human-readable name of the member. If the member is not started, the name will be an empty string. @@ -873,17 +983,21 @@ message Member { // clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty. repeated string clientURLs = 4; // isLearner indicates if the member is raft learner. - bool isLearner = 5; + bool isLearner = 5 [(versionpb.etcd_version_field)="3.4"]; } message MemberAddRequest { + option (versionpb.etcd_version_msg) = "3.0"; + // peerURLs is the list of URLs the added member will use to communicate with the cluster. repeated string peerURLs = 1; // isLearner indicates if the added member is raft learner. - bool isLearner = 2; + bool isLearner = 2 [(versionpb.etcd_version_field)="3.4"]; } message MemberAddResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; // member is the member information for the added member. Member member = 2; @@ -892,17 +1006,22 @@ message MemberAddResponse { } message MemberRemoveRequest { + option (versionpb.etcd_version_msg) = "3.0"; // ID is the member ID of the member to remove. uint64 ID = 1; } message MemberRemoveResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; // members is a list of all members after removing the member. repeated Member members = 2; } message MemberUpdateRequest { + option (versionpb.etcd_version_msg) = "3.0"; + // ID is the member ID of the member to update. uint64 ID = 1; // peerURLs is the new list of URLs the member will use to communicate with the cluster. @@ -910,59 +1029,80 @@ message MemberUpdateRequest { } message MemberUpdateResponse{ + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; // members is a list of all members after updating the member. - repeated Member members = 2; + repeated Member members = 2 [(versionpb.etcd_version_field)="3.1"]; } message MemberListRequest { - bool linearizable = 1; + option (versionpb.etcd_version_msg) = "3.0"; + + bool linearizable = 1 [(versionpb.etcd_version_field)="3.5"]; } message MemberListResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; // members is a list of all members associated with the cluster. repeated Member members = 2; } message MemberPromoteRequest { + option (versionpb.etcd_version_msg) = "3.4"; // ID is the member ID of the member to promote. uint64 ID = 1; } message MemberPromoteResponse { + option (versionpb.etcd_version_msg) = "3.4"; + ResponseHeader header = 1; // members is a list of all members after promoting the member. repeated Member members = 2; } message DefragmentRequest { + option (versionpb.etcd_version_msg) = "3.0"; } message DefragmentResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; } message MoveLeaderRequest { + option (versionpb.etcd_version_msg) = "3.3"; // targetID is the node ID for the new leader. uint64 targetID = 1; } message MoveLeaderResponse { + option (versionpb.etcd_version_msg) = "3.3"; + ResponseHeader header = 1; } enum AlarmType { + option (versionpb.etcd_version_enum) = "3.0"; + NONE = 0; // default, used to query if any alarm is active NOSPACE = 1; // space quota is exhausted - CORRUPT = 2; // kv store corruption detected + CORRUPT = 2 [(versionpb.etcd_version_enum_value)="3.3"]; // kv store corruption detected } message AlarmRequest { + option (versionpb.etcd_version_msg) = "3.0"; + enum AlarmAction { - GET = 0; - ACTIVATE = 1; - DEACTIVATE = 2; + option (versionpb.etcd_version_enum) = "3.0"; + + GET = 0; + ACTIVATE = 1; + DEACTIVATE = 2; } // action is the kind of alarm request to issue. The action // may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a @@ -976,6 +1116,7 @@ message AlarmRequest { } message AlarmMember { + option (versionpb.etcd_version_msg) = "3.0"; // memberID is the ID of the member associated with the raised alarm. uint64 memberID = 1; // alarm is the type of alarm which has been raised. @@ -983,13 +1124,19 @@ message AlarmMember { } message AlarmResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; // alarms is a list of alarms associated with the alarm request. repeated AlarmMember alarms = 2; } message DowngradeRequest { + option (versionpb.etcd_version_msg) = "3.5"; + enum DowngradeAction { + option (versionpb.etcd_version_enum) = "3.5"; + VALIDATE = 0; ENABLE = 1; CANCEL = 2; @@ -1004,15 +1151,20 @@ message DowngradeRequest { } message DowngradeResponse { + option (versionpb.etcd_version_msg) = "3.5"; + ResponseHeader header = 1; // version is the current cluster version. string version = 2; } message StatusRequest { + option (versionpb.etcd_version_msg) = "3.0"; } message StatusResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; // version is the cluster protocol version used by the responding member. string version = 2; @@ -1025,55 +1177,73 @@ message StatusResponse { // raftTerm is the current raft term of the responding member. uint64 raftTerm = 6; // raftAppliedIndex is the current raft applied index of the responding member. - uint64 raftAppliedIndex = 7; + uint64 raftAppliedIndex = 7 [(versionpb.etcd_version_field)="3.4"]; // errors contains alarm/health information and status. - repeated string errors = 8; + repeated string errors = 8 [(versionpb.etcd_version_field)="3.4"]; // dbSizeInUse is the size of the backend database logically in use, in bytes, of the responding member. - int64 dbSizeInUse = 9; + int64 dbSizeInUse = 9 [(versionpb.etcd_version_field)="3.4"]; // isLearner indicates if the member is raft learner. - bool isLearner = 10; + bool isLearner = 10 [(versionpb.etcd_version_field)="3.4"]; + // storageVersion is the version of the db file. It might be get updated with delay in relationship to the target cluster version. + string storageVersion = 11 [(versionpb.etcd_version_field)="3.6"]; + // dbSizeQuota is the configured etcd storage quota in bytes (the value passed to etcd instance by flag --quota-backend-bytes) + int64 dbSizeQuota = 12 [(versionpb.etcd_version_field)="3.6"]; } message AuthEnableRequest { + option (versionpb.etcd_version_msg) = "3.0"; } message AuthDisableRequest { + option (versionpb.etcd_version_msg) = "3.0"; } message AuthStatusRequest { + option (versionpb.etcd_version_msg) = "3.5"; } message AuthenticateRequest { + option (versionpb.etcd_version_msg) = "3.0"; + string name = 1; string password = 2; } message AuthUserAddRequest { + option (versionpb.etcd_version_msg) = "3.0"; + string name = 1; string password = 2; - authpb.UserAddOptions options = 3; - string hashedPassword = 4; + authpb.UserAddOptions options = 3 [(versionpb.etcd_version_field)="3.4"]; + string hashedPassword = 4 [(versionpb.etcd_version_field)="3.5"]; } message AuthUserGetRequest { + option (versionpb.etcd_version_msg) = "3.0"; + string name = 1; } message AuthUserDeleteRequest { + option (versionpb.etcd_version_msg) = "3.0"; // name is the name of the user to delete. string name = 1; } message AuthUserChangePasswordRequest { + option (versionpb.etcd_version_msg) = "3.0"; + // name is the name of the user whose password is being changed. string name = 1; // password is the new password for the user. Note that this field will be removed in the API layer. string password = 2; // hashedPassword is the new password for the user. Note that this field will be initialized in the API layer. - string hashedPassword = 3; + string hashedPassword = 3 [(versionpb.etcd_version_field)="3.5"]; } message AuthUserGrantRoleRequest { + option (versionpb.etcd_version_msg) = "3.0"; + // user is the name of the user which should be granted a given role. string user = 1; // role is the name of the role to grant to the user. @@ -1081,30 +1251,42 @@ message AuthUserGrantRoleRequest { } message AuthUserRevokeRoleRequest { + option (versionpb.etcd_version_msg) = "3.0"; + string name = 1; string role = 2; } message AuthRoleAddRequest { + option (versionpb.etcd_version_msg) = "3.0"; + // name is the name of the role to add to the authentication system. string name = 1; } message AuthRoleGetRequest { + option (versionpb.etcd_version_msg) = "3.0"; + string role = 1; } message AuthUserListRequest { + option (versionpb.etcd_version_msg) = "3.0"; } message AuthRoleListRequest { + option (versionpb.etcd_version_msg) = "3.0"; } message AuthRoleDeleteRequest { + option (versionpb.etcd_version_msg) = "3.0"; + string role = 1; } message AuthRoleGrantPermissionRequest { + option (versionpb.etcd_version_msg) = "3.0"; + // name is the name of the role which will be granted the permission. string name = 1; // perm is the permission to grant to the role. @@ -1112,20 +1294,28 @@ message AuthRoleGrantPermissionRequest { } message AuthRoleRevokePermissionRequest { + option (versionpb.etcd_version_msg) = "3.0"; + string role = 1; bytes key = 2; bytes range_end = 3; } message AuthEnableResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; } message AuthDisableResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; } message AuthStatusResponse { + option (versionpb.etcd_version_msg) = "3.5"; + ResponseHeader header = 1; bool enabled = 2; // authRevision is the current revision of auth store @@ -1133,67 +1323,93 @@ message AuthStatusResponse { } message AuthenticateResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; // token is an authorized token that can be used in succeeding RPCs string token = 2; } message AuthUserAddResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; } message AuthUserGetResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; repeated string roles = 2; } message AuthUserDeleteResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; } message AuthUserChangePasswordResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; } message AuthUserGrantRoleResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; } message AuthUserRevokeRoleResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; } message AuthRoleAddResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; } message AuthRoleGetResponse { - ResponseHeader header = 1; + ResponseHeader header = 1 [(versionpb.etcd_version_field)="3.0"]; - repeated authpb.Permission perm = 2; + repeated authpb.Permission perm = 2 [(versionpb.etcd_version_field)="3.0"]; } message AuthRoleListResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; repeated string roles = 2; } message AuthUserListResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; repeated string users = 2; } message AuthRoleDeleteResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; } message AuthRoleGrantPermissionResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; } message AuthRoleRevokePermissionResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; } diff --git a/vendor/go.etcd.io/etcd/api/v3/membershippb/membership.pb.go b/vendor/go.etcd.io/etcd/api/v3/membershippb/membership.pb.go index cf0d428180685..85a2a741f31e7 100644 --- a/vendor/go.etcd.io/etcd/api/v3/membershippb/membership.pb.go +++ b/vendor/go.etcd.io/etcd/api/v3/membershippb/membership.pb.go @@ -11,6 +11,7 @@ import ( _ "github.com/gogo/protobuf/gogoproto" proto "github.com/golang/protobuf/proto" + _ "go.etcd.io/etcd/api/v3/versionpb" ) // Reference imports to suppress errors if they are not otherwise used. @@ -286,30 +287,34 @@ func init() { func init() { proto.RegisterFile("membership.proto", fileDescriptor_949fe0d019050ef5) } var fileDescriptor_949fe0d019050ef5 = []byte{ - // 367 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x92, 0xc1, 0x4e, 0xf2, 0x40, - 0x14, 0x85, 0x99, 0x42, 0xf8, 0xdb, 0xcb, 0x1f, 0xc4, 0x09, 0x89, 0x8d, 0x68, 0x25, 0x5d, 0xb1, - 0x30, 0x98, 0xe8, 0x13, 0xa0, 0xb0, 0x20, 0x81, 0xcd, 0x18, 0xdd, 0x92, 0x56, 0x2e, 0xd8, 0xa4, - 0x74, 0xea, 0xcc, 0x54, 0xd7, 0xbe, 0x85, 0x4f, 0xe0, 0xb3, 0xb0, 0xf4, 0x11, 0x14, 0x5f, 0xc4, - 0x74, 0x5a, 0x4a, 0x49, 0xdc, 0xb8, 0xbb, 0x3d, 0xbd, 0xf7, 0x9c, 0xf3, 0x35, 0x85, 0xd6, 0x0a, - 0x57, 0x3e, 0x0a, 0xf9, 0x18, 0xc4, 0xfd, 0x58, 0x70, 0xc5, 0xe9, 0xff, 0x9d, 0x12, 0xfb, 0xc7, - 0xed, 0x25, 0x5f, 0x72, 0xfd, 0xe2, 0x22, 0x9d, 0xb2, 0x1d, 0x77, 0x02, 0x4d, 0xe6, 0x2d, 0xd4, - 0x40, 0x29, 0x11, 0xf8, 0x89, 0x42, 0x49, 0x3b, 0x60, 0xc5, 0x88, 0x62, 0x96, 0x88, 0x50, 0xda, - 0xa4, 0x5b, 0xed, 0x59, 0xcc, 0x4c, 0x85, 0x3b, 0x11, 0x4a, 0x7a, 0x0a, 0x10, 0xc8, 0x59, 0x88, - 0x9e, 0x88, 0x50, 0xd8, 0x46, 0x97, 0xf4, 0x4c, 0x66, 0x05, 0x72, 0x92, 0x09, 0xee, 0x00, 0xa0, - 0xe4, 0x44, 0xa1, 0x16, 0x79, 0x2b, 0xb4, 0x49, 0x97, 0xf4, 0x2c, 0xa6, 0x67, 0x7a, 0x06, 0x8d, - 0x87, 0x30, 0xc0, 0x48, 0x65, 0xfe, 0x86, 0xf6, 0x87, 0x4c, 0x4a, 0x13, 0xdc, 0x77, 0x02, 0xf5, - 0xa9, 0xee, 0x4d, 0x9b, 0x60, 0x8c, 0x87, 0xfa, 0xba, 0xc6, 0x8c, 0xf1, 0x90, 0x8e, 0xe0, 0x40, - 0x78, 0x0b, 0x35, 0xf3, 0x8a, 0x08, 0xdd, 0xa0, 0x71, 0x79, 0xd2, 0x2f, 0x93, 0xf6, 0xf7, 0x81, - 0x58, 0x53, 0xec, 0x03, 0x8e, 0xe0, 0x30, 0x5b, 0x2f, 0x1b, 0x55, 0xb5, 0x91, 0xbd, 0x6f, 0x54, - 0x32, 0xc9, 0xbf, 0xee, 0x4e, 0x71, 0xcf, 0xc1, 0xbe, 0x09, 0x13, 0xa9, 0x50, 0xdc, 0xa3, 0x90, - 0x01, 0x8f, 0x6e, 0x51, 0x31, 0x7c, 0x4a, 0x50, 0x2a, 0xda, 0x82, 0xea, 0x33, 0x8a, 0x1c, 0x3c, - 0x1d, 0xdd, 0x57, 0x02, 0x9d, 0x7c, 0x7d, 0x5a, 0x38, 0x95, 0x2e, 0x3a, 0x60, 0xe5, 0xa5, 0x0a, - 0x64, 0x33, 0x13, 0x34, 0xf8, 0x2f, 0x8d, 0x8d, 0x3f, 0x37, 0x1e, 0xc1, 0xd1, 0x90, 0xbf, 0x44, - 0x4b, 0xe1, 0xcd, 0x71, 0x1c, 0x2d, 0x78, 0x29, 0xde, 0x86, 0x7f, 0x18, 0x79, 0x7e, 0x88, 0x73, - 0x1d, 0x6e, 0xb2, 0xed, 0xe3, 0x16, 0xc5, 0x28, 0x50, 0xae, 0xdb, 0xeb, 0x2f, 0xa7, 0xb2, 0xde, - 0x38, 0xe4, 0x63, 0xe3, 0x90, 0xcf, 0x8d, 0x43, 0xde, 0xbe, 0x9d, 0x8a, 0x5f, 0xd7, 0xff, 0xd3, - 0xd5, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x93, 0x7d, 0x0b, 0x87, 0x02, 0x00, 0x00, + // 422 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x52, 0xc1, 0x6e, 0xd3, 0x40, + 0x10, 0xed, 0x3a, 0x55, 0x6b, 0x4f, 0x51, 0x28, 0x2b, 0x24, 0xac, 0x06, 0x8c, 0x55, 0x2e, 0x39, + 0xd9, 0x12, 0x51, 0x0f, 0x70, 0x03, 0xd2, 0x43, 0x10, 0xe5, 0xb0, 0xa8, 0x1c, 0xb8, 0x44, 0xeb, + 0x66, 0x62, 0x56, 0x72, 0xbc, 0x66, 0x77, 0x5d, 0xee, 0x1c, 0xf9, 0x02, 0xfe, 0x82, 0x13, 0xff, + 0xd0, 0x23, 0x9f, 0x00, 0xe1, 0x47, 0x90, 0x77, 0x9d, 0xd8, 0x11, 0x9c, 0x7a, 0x1b, 0x3f, 0xcf, + 0xbc, 0x79, 0xef, 0xed, 0xc0, 0xf1, 0x0a, 0x57, 0x19, 0x2a, 0xfd, 0x51, 0x54, 0x49, 0xa5, 0xa4, + 0x91, 0xf4, 0x4e, 0x87, 0x54, 0xd9, 0xc9, 0xfd, 0x5c, 0xe6, 0xd2, 0xfe, 0x48, 0x9b, 0xca, 0xf5, + 0x9c, 0xc4, 0x68, 0xae, 0x16, 0x29, 0xaf, 0x44, 0x7a, 0x8d, 0x4a, 0x0b, 0x59, 0x56, 0xd9, 0xa6, + 0x72, 0x1d, 0xa7, 0x97, 0x30, 0x64, 0x7c, 0x69, 0x5e, 0x18, 0xa3, 0x44, 0x56, 0x1b, 0xd4, 0x74, + 0x04, 0x41, 0x85, 0xa8, 0xe6, 0xb5, 0x2a, 0x74, 0x48, 0xe2, 0xc1, 0x38, 0x60, 0x7e, 0x03, 0x5c, + 0xaa, 0x42, 0xd3, 0x47, 0x00, 0x42, 0xcf, 0x0b, 0xe4, 0xaa, 0x44, 0x15, 0x7a, 0x31, 0x19, 0xfb, + 0x2c, 0x10, 0xfa, 0x8d, 0x03, 0x9e, 0x1f, 0x7e, 0xf9, 0x11, 0x0e, 0x26, 0xc9, 0xd9, 0xe9, 0x6b, + 0x80, 0x1e, 0x25, 0x85, 0xfd, 0x92, 0xaf, 0x30, 0x24, 0x31, 0x19, 0x07, 0xcc, 0xd6, 0xf4, 0x31, + 0x1c, 0x5d, 0x15, 0x02, 0x4b, 0xe3, 0x16, 0x79, 0x76, 0x11, 0x38, 0xa8, 0x59, 0xd5, 0x71, 0x7d, + 0x27, 0x70, 0x70, 0x61, 0xbd, 0xd2, 0x21, 0x78, 0xb3, 0xa9, 0xa5, 0xd9, 0x67, 0xde, 0x6c, 0x4a, + 0xcf, 0xe1, 0xae, 0xe2, 0x4b, 0x33, 0xe7, 0xdb, 0x5d, 0x56, 0xd3, 0xd1, 0xd3, 0x87, 0x49, 0x3f, + 0x9d, 0x64, 0xd7, 0x22, 0x1b, 0xaa, 0x5d, 0xcb, 0xe7, 0x70, 0xcf, 0xb5, 0xf7, 0x89, 0x06, 0x96, + 0x28, 0xdc, 0x25, 0xea, 0x91, 0xb4, 0x2f, 0xd2, 0x21, 0x9d, 0xe2, 0x33, 0x08, 0x5f, 0x15, 0xb5, + 0x36, 0xa8, 0xde, 0xbb, 0xb0, 0xdf, 0xa1, 0x61, 0xf8, 0xa9, 0x46, 0x6d, 0xe8, 0x31, 0x0c, 0xae, + 0x51, 0xb5, 0x51, 0x34, 0x65, 0x37, 0xf6, 0x95, 0xc0, 0xa8, 0x9d, 0xbb, 0xd8, 0x72, 0xf7, 0x46, + 0x47, 0x10, 0xb4, 0x32, 0xb7, 0x21, 0xf8, 0x0e, 0xb0, 0x51, 0xfc, 0xc7, 0x83, 0x77, 0x7b, 0x0f, + 0x6f, 0xe1, 0xc1, 0x54, 0x7e, 0x2e, 0x73, 0xc5, 0x17, 0x38, 0x2b, 0x97, 0xb2, 0xa7, 0x23, 0x84, + 0x43, 0x2c, 0x79, 0x56, 0xe0, 0xc2, 0xaa, 0xf0, 0xd9, 0xe6, 0x73, 0x63, 0xce, 0xfb, 0xd7, 0xdc, + 0xcb, 0x67, 0x37, 0xbf, 0xa3, 0xbd, 0x9b, 0x75, 0x44, 0x7e, 0xae, 0x23, 0xf2, 0x6b, 0x1d, 0x91, + 0x6f, 0x7f, 0xa2, 0xbd, 0x0f, 0x4f, 0x72, 0x99, 0x34, 0x37, 0x9a, 0x08, 0x99, 0x76, 0xb7, 0x3a, + 0x49, 0xfb, 0x82, 0xb3, 0x03, 0x7b, 0xaa, 0x93, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xcf, 0x56, + 0x21, 0x97, 0x04, 0x03, 0x00, 0x00, } func (m *RaftAttributes) Marshal() (dAtA []byte, err error) { diff --git a/vendor/go.etcd.io/etcd/api/v3/membershippb/membership.proto b/vendor/go.etcd.io/etcd/api/v3/membershippb/membership.proto index e63e9ecc994e1..cbccfefccf451 100644 --- a/vendor/go.etcd.io/etcd/api/v3/membershippb/membership.proto +++ b/vendor/go.etcd.io/etcd/api/v3/membershippb/membership.proto @@ -2,6 +2,9 @@ syntax = "proto3"; package membershippb; import "gogoproto/gogo.proto"; +import "etcd/api/versionpb/version.proto"; + +option go_package = "go.etcd.io/etcd/api/v3/membershippb"; option (gogoproto.marshaler_all) = true; option (gogoproto.sizer_all) = true; @@ -10,6 +13,8 @@ option (gogoproto.goproto_getters_all) = false; // RaftAttributes represents the raft related attributes of an etcd member. message RaftAttributes { + option (versionpb.etcd_version_msg) = "3.5"; + // peerURLs is the list of peers in the raft cluster. repeated string peer_urls = 1; // isLearner indicates if the member is raft learner. @@ -18,26 +23,36 @@ message RaftAttributes { // Attributes represents all the non-raft related attributes of an etcd member. message Attributes { + option (versionpb.etcd_version_msg) = "3.5"; + string name = 1; repeated string client_urls = 2; } message Member { + option (versionpb.etcd_version_msg) = "3.5"; + uint64 ID = 1; RaftAttributes raft_attributes = 2; Attributes member_attributes = 3; } message ClusterVersionSetRequest { + option (versionpb.etcd_version_msg) = "3.5"; + string ver = 1; } message ClusterMemberAttrSetRequest { + option (versionpb.etcd_version_msg) = "3.5"; + uint64 member_ID = 1; Attributes member_attributes = 2; } message DowngradeInfoSetRequest { + option (versionpb.etcd_version_msg) = "3.5"; + bool enabled = 1; string ver = 2; } \ No newline at end of file diff --git a/vendor/go.etcd.io/etcd/api/v3/mvccpb/kv.pb.go b/vendor/go.etcd.io/etcd/api/v3/mvccpb/kv.pb.go index fc258d6c206da..2fed4242ccc61 100644 --- a/vendor/go.etcd.io/etcd/api/v3/mvccpb/kv.pb.go +++ b/vendor/go.etcd.io/etcd/api/v3/mvccpb/kv.pb.go @@ -164,26 +164,28 @@ func init() { func init() { proto.RegisterFile("kv.proto", fileDescriptor_2216fe83c9c12408) } var fileDescriptor_2216fe83c9c12408 = []byte{ - // 303 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x41, 0x4e, 0xc2, 0x40, - 0x14, 0x86, 0x3b, 0x14, 0x0a, 0x3e, 0x08, 0x36, 0x13, 0x12, 0x27, 0x2e, 0x26, 0x95, 0x8d, 0x18, - 0x13, 0x4c, 0xf0, 0x06, 0xc6, 0xae, 0x70, 0x61, 0x1a, 0x74, 0x4b, 0x4a, 0x79, 0x21, 0xa4, 0x94, - 0x69, 0x4a, 0x9d, 0xa4, 0x37, 0x71, 0xef, 0xde, 0x73, 0xb0, 0xe4, 0x08, 0x52, 0x2f, 0x62, 0xfa, - 0xc6, 0xe2, 0xc6, 0xcd, 0xe4, 0xfd, 0xff, 0xff, 0x65, 0xe6, 0x7f, 0x03, 0x9d, 0x58, 0x8f, 0xd3, - 0x4c, 0xe5, 0x8a, 0x3b, 0x89, 0x8e, 0xa2, 0x74, 0x71, 0x39, 0x58, 0xa9, 0x95, 0x22, 0xeb, 0xae, - 0x9a, 0x4c, 0x3a, 0xfc, 0x64, 0xd0, 0x99, 0x62, 0xf1, 0x1a, 0x6e, 0xde, 0x90, 0xbb, 0x60, 0xc7, - 0x58, 0x08, 0xe6, 0xb1, 0x51, 0x2f, 0xa8, 0x46, 0x7e, 0x0d, 0xe7, 0x51, 0x86, 0x61, 0x8e, 0xf3, - 0x0c, 0xf5, 0x7a, 0xb7, 0x56, 0x5b, 0xd1, 0xf0, 0xd8, 0xc8, 0x0e, 0xfa, 0xc6, 0x0e, 0x7e, 0x5d, - 0x7e, 0x05, 0xbd, 0x44, 0x2d, 0xff, 0x28, 0x9b, 0xa8, 0x6e, 0xa2, 0x96, 0x27, 0x44, 0x40, 0x5b, - 0x63, 0x46, 0x69, 0x93, 0xd2, 0x5a, 0xf2, 0x01, 0xb4, 0x74, 0x55, 0x40, 0xb4, 0xe8, 0x65, 0x23, - 0x2a, 0x77, 0x83, 0xe1, 0x0e, 0x85, 0x43, 0xb4, 0x11, 0xc3, 0x0f, 0x06, 0x2d, 0x5f, 0xe3, 0x36, - 0xe7, 0xb7, 0xd0, 0xcc, 0x8b, 0x14, 0xa9, 0x6e, 0x7f, 0x72, 0x31, 0x36, 0x7b, 0x8e, 0x29, 0x34, - 0xe7, 0xac, 0x48, 0x31, 0x20, 0x88, 0x7b, 0xd0, 0x88, 0x35, 0x75, 0xef, 0x4e, 0xdc, 0x1a, 0xad, - 0x17, 0x0f, 0x1a, 0xb1, 0xe6, 0x37, 0xd0, 0x4e, 0x33, 0xd4, 0xf3, 0x58, 0x53, 0xf9, 0xff, 0x30, - 0xa7, 0x02, 0xa6, 0x7a, 0xe8, 0xc1, 0xd9, 0xe9, 0x7e, 0xde, 0x06, 0xfb, 0xf9, 0x65, 0xe6, 0x5a, - 0x1c, 0xc0, 0x79, 0xf4, 0x9f, 0xfc, 0x99, 0xef, 0xb2, 0x07, 0xb1, 0x3f, 0x4a, 0xeb, 0x70, 0x94, - 0xd6, 0xbe, 0x94, 0xec, 0x50, 0x4a, 0xf6, 0x55, 0x4a, 0xf6, 0xfe, 0x2d, 0xad, 0x85, 0x43, 0xff, - 0x7e, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x45, 0x92, 0x5d, 0xa1, 0x01, 0x00, 0x00, + // 327 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0xc1, 0x6a, 0xfa, 0x40, + 0x10, 0xc6, 0xb3, 0x46, 0xa3, 0xff, 0x51, 0xfc, 0x87, 0x45, 0x68, 0x28, 0x34, 0xa4, 0x5e, 0x6a, + 0x29, 0x24, 0xa0, 0x87, 0xde, 0x4b, 0x73, 0xb2, 0x87, 0x12, 0x6c, 0x0f, 0xbd, 0x48, 0x8c, 0x83, + 0x84, 0xa8, 0x1b, 0x62, 0xba, 0x90, 0x37, 0xe9, 0xbd, 0xf7, 0x3e, 0x87, 0x47, 0x1f, 0xa1, 0xda, + 0x17, 0x29, 0x3b, 0x5b, 0xed, 0xa5, 0x97, 0xdd, 0x99, 0xef, 0xfb, 0xb1, 0xf3, 0x0d, 0x0b, 0xad, + 0x4c, 0xfa, 0x79, 0x21, 0x4a, 0xc1, 0xad, 0x95, 0x4c, 0x92, 0x7c, 0x76, 0xde, 0x5b, 0x88, 0x85, + 0x20, 0x29, 0x50, 0x95, 0x76, 0xfb, 0x1f, 0x0c, 0x5a, 0x63, 0xac, 0x9e, 0xe3, 0xe5, 0x2b, 0x72, + 0x1b, 0xcc, 0x0c, 0x2b, 0x87, 0x79, 0x6c, 0xd0, 0x89, 0x54, 0xc9, 0xaf, 0xe0, 0x7f, 0x52, 0x60, + 0x5c, 0xe2, 0xb4, 0x40, 0x99, 0x6e, 0x52, 0xb1, 0x76, 0x6a, 0x1e, 0x1b, 0x98, 0x51, 0x57, 0xcb, + 0xd1, 0x8f, 0xca, 0x2f, 0xa1, 0xb3, 0x12, 0xf3, 0x5f, 0xca, 0x24, 0xaa, 0xbd, 0x12, 0xf3, 0x13, + 0xe2, 0x40, 0x53, 0x62, 0x41, 0x6e, 0x9d, 0xdc, 0x63, 0xcb, 0x7b, 0xd0, 0x90, 0x2a, 0x80, 0xd3, + 0xa0, 0xc9, 0xba, 0x51, 0xea, 0x12, 0xe3, 0x0d, 0x3a, 0x16, 0xd1, 0xba, 0xe9, 0xbf, 0x33, 0x68, + 0x84, 0x12, 0xd7, 0x25, 0xbf, 0x81, 0x7a, 0x59, 0xe5, 0x48, 0x71, 0xbb, 0xc3, 0x33, 0x5f, 0xef, + 0xe9, 0x93, 0xa9, 0xcf, 0x49, 0x95, 0x63, 0x44, 0x10, 0xf7, 0xa0, 0x96, 0x49, 0xca, 0xde, 0x1e, + 0xda, 0x47, 0xf4, 0xb8, 0x78, 0x54, 0xcb, 0x24, 0xbf, 0x86, 0x66, 0x5e, 0xa0, 0x9c, 0x66, 0x92, + 0xc2, 0xff, 0x85, 0x59, 0x0a, 0x18, 0xcb, 0xbe, 0x07, 0xff, 0x4e, 0xef, 0xf3, 0x26, 0x98, 0x8f, + 0x4f, 0x13, 0xdb, 0xe0, 0x00, 0xd6, 0x7d, 0xf8, 0x10, 0x4e, 0x42, 0x9b, 0xdd, 0xdd, 0x6e, 0xf7, + 0xae, 0xb1, 0xdb, 0xbb, 0xc6, 0xf6, 0xe0, 0xb2, 0xdd, 0xc1, 0x65, 0x9f, 0x07, 0x97, 0xbd, 0x7d, + 0xb9, 0xc6, 0xcb, 0xc5, 0x42, 0xf8, 0x58, 0x26, 0x73, 0x3f, 0x15, 0x81, 0xba, 0x83, 0x38, 0x4f, + 0x03, 0x39, 0x0a, 0xf4, 0xac, 0x99, 0x45, 0xdf, 0x32, 0xfa, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x78, + 0x06, 0x46, 0xf5, 0xc0, 0x01, 0x00, 0x00, } func (m *KeyValue) Marshal() (dAtA []byte, err error) { diff --git a/vendor/go.etcd.io/etcd/api/v3/mvccpb/kv.proto b/vendor/go.etcd.io/etcd/api/v3/mvccpb/kv.proto index 23c911b7da881..a93479c69f74c 100644 --- a/vendor/go.etcd.io/etcd/api/v3/mvccpb/kv.proto +++ b/vendor/go.etcd.io/etcd/api/v3/mvccpb/kv.proto @@ -3,6 +3,8 @@ package mvccpb; import "gogoproto/gogo.proto"; +option go_package = "go.etcd.io/etcd/api/v3/mvccpb"; + option (gogoproto.marshaler_all) = true; option (gogoproto.sizer_all) = true; option (gogoproto.unmarshaler_all) = true; diff --git a/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/error.go b/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/error.go index 23201302e833c..28f8d7e435fe9 100644 --- a/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/error.go +++ b/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/error.go @@ -21,75 +21,81 @@ import ( // server-side error var ( - ErrGRPCEmptyKey = status.New(codes.InvalidArgument, "etcdserver: key is not provided").Err() - ErrGRPCKeyNotFound = status.New(codes.InvalidArgument, "etcdserver: key not found").Err() - ErrGRPCValueProvided = status.New(codes.InvalidArgument, "etcdserver: value is provided").Err() - ErrGRPCLeaseProvided = status.New(codes.InvalidArgument, "etcdserver: lease is provided").Err() - ErrGRPCTooManyOps = status.New(codes.InvalidArgument, "etcdserver: too many operations in txn request").Err() - ErrGRPCDuplicateKey = status.New(codes.InvalidArgument, "etcdserver: duplicate key given in txn request").Err() - ErrGRPCCompacted = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted").Err() - ErrGRPCFutureRev = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision").Err() - ErrGRPCNoSpace = status.New(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded").Err() + ErrGRPCEmptyKey = status.Error(codes.InvalidArgument, "etcdserver: key is not provided") + ErrGRPCKeyNotFound = status.Error(codes.InvalidArgument, "etcdserver: key not found") + ErrGRPCValueProvided = status.Error(codes.InvalidArgument, "etcdserver: value is provided") + ErrGRPCLeaseProvided = status.Error(codes.InvalidArgument, "etcdserver: lease is provided") + ErrGRPCTooManyOps = status.Error(codes.InvalidArgument, "etcdserver: too many operations in txn request") + ErrGRPCDuplicateKey = status.Error(codes.InvalidArgument, "etcdserver: duplicate key given in txn request") + ErrGRPCInvalidClientAPIVersion = status.Error(codes.InvalidArgument, "etcdserver: invalid client api version") + ErrGRPCInvalidSortOption = status.Error(codes.InvalidArgument, "etcdserver: invalid sort option") + ErrGRPCCompacted = status.Error(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted") + ErrGRPCFutureRev = status.Error(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision") + ErrGRPCNoSpace = status.Error(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded") - ErrGRPCLeaseNotFound = status.New(codes.NotFound, "etcdserver: requested lease not found").Err() - ErrGRPCLeaseExist = status.New(codes.FailedPrecondition, "etcdserver: lease already exists").Err() - ErrGRPCLeaseTTLTooLarge = status.New(codes.OutOfRange, "etcdserver: too large lease TTL").Err() + ErrGRPCLeaseNotFound = status.Error(codes.NotFound, "etcdserver: requested lease not found") + ErrGRPCLeaseExist = status.Error(codes.FailedPrecondition, "etcdserver: lease already exists") + ErrGRPCLeaseTTLTooLarge = status.Error(codes.OutOfRange, "etcdserver: too large lease TTL") - ErrGRPCWatchCanceled = status.New(codes.Canceled, "etcdserver: watch canceled").Err() + ErrGRPCWatchCanceled = status.Error(codes.Canceled, "etcdserver: watch canceled") - ErrGRPCMemberExist = status.New(codes.FailedPrecondition, "etcdserver: member ID already exist").Err() - ErrGRPCPeerURLExist = status.New(codes.FailedPrecondition, "etcdserver: Peer URLs already exists").Err() - ErrGRPCMemberNotEnoughStarted = status.New(codes.FailedPrecondition, "etcdserver: re-configuration failed due to not enough started members").Err() - ErrGRPCMemberBadURLs = status.New(codes.InvalidArgument, "etcdserver: given member URLs are invalid").Err() - ErrGRPCMemberNotFound = status.New(codes.NotFound, "etcdserver: member not found").Err() - ErrGRPCMemberNotLearner = status.New(codes.FailedPrecondition, "etcdserver: can only promote a learner member").Err() - ErrGRPCLearnerNotReady = status.New(codes.FailedPrecondition, "etcdserver: can only promote a learner member which is in sync with leader").Err() - ErrGRPCTooManyLearners = status.New(codes.FailedPrecondition, "etcdserver: too many learner members in cluster").Err() - ErrGRPCClusterIdMismatch = status.New(codes.FailedPrecondition, "etcdserver: cluster ID mismatch").Err() + ErrGRPCMemberExist = status.Error(codes.FailedPrecondition, "etcdserver: member ID already exist") + ErrGRPCPeerURLExist = status.Error(codes.FailedPrecondition, "etcdserver: Peer URLs already exists") + ErrGRPCMemberNotEnoughStarted = status.Error(codes.FailedPrecondition, "etcdserver: re-configuration failed due to not enough started members") + ErrGRPCMemberBadURLs = status.Error(codes.InvalidArgument, "etcdserver: given member URLs are invalid") + ErrGRPCMemberNotFound = status.Error(codes.NotFound, "etcdserver: member not found") + ErrGRPCMemberNotLearner = status.Error(codes.FailedPrecondition, "etcdserver: can only promote a learner member") + ErrGRPCLearnerNotReady = status.Error(codes.FailedPrecondition, "etcdserver: can only promote a learner member which is in sync with leader") + ErrGRPCTooManyLearners = status.Error(codes.FailedPrecondition, "etcdserver: too many learner members in cluster") + ErrGRPCClusterIDMismatch = status.Error(codes.FailedPrecondition, "etcdserver: cluster ID mismatch") + //revive:disable:var-naming + // Deprecated: Please use ErrGRPCClusterIDMismatch. + ErrGRPCClusterIdMismatch = ErrGRPCClusterIDMismatch + //revive:enable:var-naming - ErrGRPCRequestTooLarge = status.New(codes.InvalidArgument, "etcdserver: request is too large").Err() - ErrGRPCRequestTooManyRequests = status.New(codes.ResourceExhausted, "etcdserver: too many requests").Err() + ErrGRPCRequestTooLarge = status.Error(codes.InvalidArgument, "etcdserver: request is too large") + ErrGRPCRequestTooManyRequests = status.Error(codes.ResourceExhausted, "etcdserver: too many requests") - ErrGRPCRootUserNotExist = status.New(codes.FailedPrecondition, "etcdserver: root user does not exist").Err() - ErrGRPCRootRoleNotExist = status.New(codes.FailedPrecondition, "etcdserver: root user does not have root role").Err() - ErrGRPCUserAlreadyExist = status.New(codes.FailedPrecondition, "etcdserver: user name already exists").Err() - ErrGRPCUserEmpty = status.New(codes.InvalidArgument, "etcdserver: user name is empty").Err() - ErrGRPCUserNotFound = status.New(codes.FailedPrecondition, "etcdserver: user name not found").Err() - ErrGRPCRoleAlreadyExist = status.New(codes.FailedPrecondition, "etcdserver: role name already exists").Err() - ErrGRPCRoleNotFound = status.New(codes.FailedPrecondition, "etcdserver: role name not found").Err() - ErrGRPCRoleEmpty = status.New(codes.InvalidArgument, "etcdserver: role name is empty").Err() - ErrGRPCAuthFailed = status.New(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password").Err() - ErrGRPCPermissionNotGiven = status.New(codes.InvalidArgument, "etcdserver: permission not given").Err() - ErrGRPCPermissionDenied = status.New(codes.PermissionDenied, "etcdserver: permission denied").Err() - ErrGRPCRoleNotGranted = status.New(codes.FailedPrecondition, "etcdserver: role is not granted to the user").Err() - ErrGRPCPermissionNotGranted = status.New(codes.FailedPrecondition, "etcdserver: permission is not granted to the role").Err() - ErrGRPCAuthNotEnabled = status.New(codes.FailedPrecondition, "etcdserver: authentication is not enabled").Err() - ErrGRPCInvalidAuthToken = status.New(codes.Unauthenticated, "etcdserver: invalid auth token").Err() - ErrGRPCInvalidAuthMgmt = status.New(codes.InvalidArgument, "etcdserver: invalid auth management").Err() - ErrGRPCAuthOldRevision = status.New(codes.InvalidArgument, "etcdserver: revision of auth store is old").Err() + ErrGRPCRootUserNotExist = status.Error(codes.FailedPrecondition, "etcdserver: root user does not exist") + ErrGRPCRootRoleNotExist = status.Error(codes.FailedPrecondition, "etcdserver: root user does not have root role") + ErrGRPCUserAlreadyExist = status.Error(codes.FailedPrecondition, "etcdserver: user name already exists") + ErrGRPCUserEmpty = status.Error(codes.InvalidArgument, "etcdserver: user name is empty") + ErrGRPCUserNotFound = status.Error(codes.FailedPrecondition, "etcdserver: user name not found") + ErrGRPCRoleAlreadyExist = status.Error(codes.FailedPrecondition, "etcdserver: role name already exists") + ErrGRPCRoleNotFound = status.Error(codes.FailedPrecondition, "etcdserver: role name not found") + ErrGRPCRoleEmpty = status.Error(codes.InvalidArgument, "etcdserver: role name is empty") + ErrGRPCAuthFailed = status.Error(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password") + ErrGRPCPermissionNotGiven = status.Error(codes.InvalidArgument, "etcdserver: permission not given") + ErrGRPCPermissionDenied = status.Error(codes.PermissionDenied, "etcdserver: permission denied") + ErrGRPCRoleNotGranted = status.Error(codes.FailedPrecondition, "etcdserver: role is not granted to the user") + ErrGRPCPermissionNotGranted = status.Error(codes.FailedPrecondition, "etcdserver: permission is not granted to the role") + ErrGRPCAuthNotEnabled = status.Error(codes.FailedPrecondition, "etcdserver: authentication is not enabled") + ErrGRPCInvalidAuthToken = status.Error(codes.Unauthenticated, "etcdserver: invalid auth token") + ErrGRPCInvalidAuthMgmt = status.Error(codes.InvalidArgument, "etcdserver: invalid auth management") + ErrGRPCAuthOldRevision = status.Error(codes.InvalidArgument, "etcdserver: revision of auth store is old") - ErrGRPCNoLeader = status.New(codes.Unavailable, "etcdserver: no leader").Err() - ErrGRPCNotLeader = status.New(codes.FailedPrecondition, "etcdserver: not leader").Err() - ErrGRPCLeaderChanged = status.New(codes.Unavailable, "etcdserver: leader changed").Err() - ErrGRPCNotCapable = status.New(codes.Unavailable, "etcdserver: not capable").Err() - ErrGRPCStopped = status.New(codes.Unavailable, "etcdserver: server stopped").Err() - ErrGRPCTimeout = status.New(codes.Unavailable, "etcdserver: request timed out").Err() - ErrGRPCTimeoutDueToLeaderFail = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to previous leader failure").Err() - ErrGRPCTimeoutDueToConnectionLost = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to connection lost").Err() - ErrGRPCTimeoutWaitAppliedIndex = status.New(codes.Unavailable, "etcdserver: request timed out, waiting for the applied index took too long").Err() - ErrGRPCUnhealthy = status.New(codes.Unavailable, "etcdserver: unhealthy cluster").Err() - ErrGRPCCorrupt = status.New(codes.DataLoss, "etcdserver: corrupt cluster").Err() - ErrGPRCNotSupportedForLearner = status.New(codes.Unavailable, "etcdserver: rpc not supported for learner").Err() - ErrGRPCBadLeaderTransferee = status.New(codes.FailedPrecondition, "etcdserver: bad leader transferee").Err() + ErrGRPCNoLeader = status.Error(codes.Unavailable, "etcdserver: no leader") + ErrGRPCNotLeader = status.Error(codes.FailedPrecondition, "etcdserver: not leader") + ErrGRPCLeaderChanged = status.Error(codes.Unavailable, "etcdserver: leader changed") + ErrGRPCNotCapable = status.Error(codes.FailedPrecondition, "etcdserver: not capable") + ErrGRPCStopped = status.Error(codes.Unavailable, "etcdserver: server stopped") + ErrGRPCTimeout = status.Error(codes.Unavailable, "etcdserver: request timed out") + ErrGRPCTimeoutDueToLeaderFail = status.Error(codes.Unavailable, "etcdserver: request timed out, possibly due to previous leader failure") + ErrGRPCTimeoutDueToConnectionLost = status.Error(codes.Unavailable, "etcdserver: request timed out, possibly due to connection lost") + ErrGRPCTimeoutWaitAppliedIndex = status.Error(codes.Unavailable, "etcdserver: request timed out, waiting for the applied index took too long") + ErrGRPCUnhealthy = status.Error(codes.Unavailable, "etcdserver: unhealthy cluster") + ErrGRPCCorrupt = status.Error(codes.DataLoss, "etcdserver: corrupt cluster") + ErrGRPCNotSupportedForLearner = status.Error(codes.FailedPrecondition, "etcdserver: rpc not supported for learner") + ErrGRPCBadLeaderTransferee = status.Error(codes.FailedPrecondition, "etcdserver: bad leader transferee") - ErrGRPCClusterVersionUnavailable = status.New(codes.Unavailable, "etcdserver: cluster version not found during downgrade").Err() - ErrGRPCWrongDowngradeVersionFormat = status.New(codes.InvalidArgument, "etcdserver: wrong downgrade target version format").Err() - ErrGRPCInvalidDowngradeTargetVersion = status.New(codes.InvalidArgument, "etcdserver: invalid downgrade target version").Err() - ErrGRPCDowngradeInProcess = status.New(codes.FailedPrecondition, "etcdserver: cluster has a downgrade job in progress").Err() - ErrGRPCNoInflightDowngrade = status.New(codes.FailedPrecondition, "etcdserver: no inflight downgrade job").Err() + ErrGRPCWrongDowngradeVersionFormat = status.Error(codes.InvalidArgument, "etcdserver: wrong downgrade target version format") + ErrGRPCInvalidDowngradeTargetVersion = status.Error(codes.InvalidArgument, "etcdserver: invalid downgrade target version") + ErrGRPCClusterVersionUnavailable = status.Error(codes.FailedPrecondition, "etcdserver: cluster version not found during downgrade") + ErrGRPCDowngradeInProcess = status.Error(codes.FailedPrecondition, "etcdserver: cluster has a downgrade job in progress") + ErrGRPCNoInflightDowngrade = status.Error(codes.FailedPrecondition, "etcdserver: no inflight downgrade job") - ErrGRPCCanceled = status.New(codes.Canceled, "etcdserver: request canceled").Err() - ErrGRPCDeadlineExceeded = status.New(codes.DeadlineExceeded, "etcdserver: context deadline exceeded").Err() + ErrGRPCCanceled = status.Error(codes.Canceled, "etcdserver: request canceled") + ErrGRPCDeadlineExceeded = status.Error(codes.DeadlineExceeded, "etcdserver: context deadline exceeded") errStringToError = map[string]error{ ErrorDesc(ErrGRPCEmptyKey): ErrGRPCEmptyKey, @@ -97,11 +103,12 @@ var ( ErrorDesc(ErrGRPCValueProvided): ErrGRPCValueProvided, ErrorDesc(ErrGRPCLeaseProvided): ErrGRPCLeaseProvided, - ErrorDesc(ErrGRPCTooManyOps): ErrGRPCTooManyOps, - ErrorDesc(ErrGRPCDuplicateKey): ErrGRPCDuplicateKey, - ErrorDesc(ErrGRPCCompacted): ErrGRPCCompacted, - ErrorDesc(ErrGRPCFutureRev): ErrGRPCFutureRev, - ErrorDesc(ErrGRPCNoSpace): ErrGRPCNoSpace, + ErrorDesc(ErrGRPCTooManyOps): ErrGRPCTooManyOps, + ErrorDesc(ErrGRPCDuplicateKey): ErrGRPCDuplicateKey, + ErrorDesc(ErrGRPCInvalidSortOption): ErrGRPCInvalidSortOption, + ErrorDesc(ErrGRPCCompacted): ErrGRPCCompacted, + ErrorDesc(ErrGRPCFutureRev): ErrGRPCFutureRev, + ErrorDesc(ErrGRPCNoSpace): ErrGRPCNoSpace, ErrorDesc(ErrGRPCLeaseNotFound): ErrGRPCLeaseNotFound, ErrorDesc(ErrGRPCLeaseExist): ErrGRPCLeaseExist, @@ -115,7 +122,7 @@ var ( ErrorDesc(ErrGRPCMemberNotLearner): ErrGRPCMemberNotLearner, ErrorDesc(ErrGRPCLearnerNotReady): ErrGRPCLearnerNotReady, ErrorDesc(ErrGRPCTooManyLearners): ErrGRPCTooManyLearners, - ErrorDesc(ErrGRPCClusterIdMismatch): ErrGRPCClusterIdMismatch, + ErrorDesc(ErrGRPCClusterIDMismatch): ErrGRPCClusterIDMismatch, ErrorDesc(ErrGRPCRequestTooLarge): ErrGRPCRequestTooLarge, ErrorDesc(ErrGRPCRequestTooManyRequests): ErrGRPCRequestTooManyRequests, @@ -147,7 +154,7 @@ var ( ErrorDesc(ErrGRPCTimeoutDueToConnectionLost): ErrGRPCTimeoutDueToConnectionLost, ErrorDesc(ErrGRPCUnhealthy): ErrGRPCUnhealthy, ErrorDesc(ErrGRPCCorrupt): ErrGRPCCorrupt, - ErrorDesc(ErrGPRCNotSupportedForLearner): ErrGPRCNotSupportedForLearner, + ErrorDesc(ErrGRPCNotSupportedForLearner): ErrGRPCNotSupportedForLearner, ErrorDesc(ErrGRPCBadLeaderTransferee): ErrGRPCBadLeaderTransferee, ErrorDesc(ErrGRPCClusterVersionUnavailable): ErrGRPCClusterVersionUnavailable, @@ -160,15 +167,16 @@ var ( // client-side error var ( - ErrEmptyKey = Error(ErrGRPCEmptyKey) - ErrKeyNotFound = Error(ErrGRPCKeyNotFound) - ErrValueProvided = Error(ErrGRPCValueProvided) - ErrLeaseProvided = Error(ErrGRPCLeaseProvided) - ErrTooManyOps = Error(ErrGRPCTooManyOps) - ErrDuplicateKey = Error(ErrGRPCDuplicateKey) - ErrCompacted = Error(ErrGRPCCompacted) - ErrFutureRev = Error(ErrGRPCFutureRev) - ErrNoSpace = Error(ErrGRPCNoSpace) + ErrEmptyKey = Error(ErrGRPCEmptyKey) + ErrKeyNotFound = Error(ErrGRPCKeyNotFound) + ErrValueProvided = Error(ErrGRPCValueProvided) + ErrLeaseProvided = Error(ErrGRPCLeaseProvided) + ErrTooManyOps = Error(ErrGRPCTooManyOps) + ErrDuplicateKey = Error(ErrGRPCDuplicateKey) + ErrInvalidSortOption = Error(ErrGRPCInvalidSortOption) + ErrCompacted = Error(ErrGRPCCompacted) + ErrFutureRev = Error(ErrGRPCFutureRev) + ErrNoSpace = Error(ErrGRPCNoSpace) ErrLeaseNotFound = Error(ErrGRPCLeaseNotFound) ErrLeaseExist = Error(ErrGRPCLeaseExist) @@ -202,7 +210,11 @@ var ( ErrInvalidAuthToken = Error(ErrGRPCInvalidAuthToken) ErrAuthOldRevision = Error(ErrGRPCAuthOldRevision) ErrInvalidAuthMgmt = Error(ErrGRPCInvalidAuthMgmt) - ErrClusterIdMismatch = Error(ErrGRPCClusterIdMismatch) + ErrClusterIDMismatch = Error(ErrGRPCClusterIDMismatch) + //revive:disable:var-naming + // Deprecated: Please use ErrGRPCClusterIDMismatch. + ErrClusterIdMismatch = ErrClusterIDMismatch + //revive:enable:var-naming ErrNoLeader = Error(ErrGRPCNoLeader) ErrNotLeader = Error(ErrGRPCNotLeader) diff --git a/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/metadatafields.go b/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/metadatafields.go index 8f8ac60ff2241..e5770afb2e89c 100644 --- a/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/metadatafields.go +++ b/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/metadatafields.go @@ -18,3 +18,6 @@ var ( TokenFieldNameGRPC = "token" TokenFieldNameSwagger = "authorization" ) + +// TokenFieldNameGRPCKey is used as a key of context to store token. +type TokenFieldNameGRPCKey struct{} diff --git a/vendor/go.etcd.io/etcd/api/v3/version/version.go b/vendor/go.etcd.io/etcd/api/v3/version/version.go index 36719c1920846..ef3c09e7c115d 100644 --- a/vendor/go.etcd.io/etcd/api/v3/version/version.go +++ b/vendor/go.etcd.io/etcd/api/v3/version/version.go @@ -26,13 +26,29 @@ import ( var ( // MinClusterVersion is the min cluster version this etcd binary is compatible with. MinClusterVersion = "3.0.0" - Version = "3.5.16" + Version = "3.6.0-alpha.0" APIVersion = "unknown" // Git SHA Value will be set during build GitSHA = "Not provided (use ./build instead of go build)" ) +// Get all constant versions defined in a centralized place. +var ( + V3_0 = semver.Version{Major: 3, Minor: 0} + V3_1 = semver.Version{Major: 3, Minor: 1} + V3_2 = semver.Version{Major: 3, Minor: 2} + V3_3 = semver.Version{Major: 3, Minor: 3} + V3_4 = semver.Version{Major: 3, Minor: 4} + V3_5 = semver.Version{Major: 3, Minor: 5} + V3_6 = semver.Version{Major: 3, Minor: 6} + V3_7 = semver.Version{Major: 3, Minor: 7} + V4_0 = semver.Version{Major: 4, Minor: 0} + + // AllVersions keeps all the versions in ascending order. + AllVersions = []semver.Version{V3_0, V3_1, V3_2, V3_3, V3_4, V3_5, V3_6, V3_7, V4_0} +) + func init() { ver, err := semver.NewVersion(Version) if err == nil { @@ -43,6 +59,7 @@ func init() { type Versions struct { Server string `json:"etcdserver"` Cluster string `json:"etcdcluster"` + Storage string `json:"storage"` // TODO: raft state machine version } @@ -54,3 +71,15 @@ func Cluster(v string) string { } return fmt.Sprintf("%s.%s", vs[0], vs[1]) } + +func Compare(ver1, ver2 semver.Version) int { + return ver1.Compare(ver2) +} + +func LessThan(ver1, ver2 semver.Version) bool { + return ver1.LessThan(ver2) +} + +func Equal(ver1, ver2 semver.Version) bool { + return ver1.Equal(ver2) +} diff --git a/vendor/go.etcd.io/etcd/api/v3/versionpb/version.pb.go b/vendor/go.etcd.io/etcd/api/v3/versionpb/version.pb.go new file mode 100644 index 0000000000000..71c74eb719297 --- /dev/null +++ b/vendor/go.etcd.io/etcd/api/v3/versionpb/version.pb.go @@ -0,0 +1,91 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: version.proto + +package versionpb + +import ( + fmt "fmt" + math "math" + + _ "github.com/gogo/protobuf/gogoproto" + descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + proto "github.com/golang/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +var E_EtcdVersionMsg = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*string)(nil), + Field: 50000, + Name: "versionpb.etcd_version_msg", + Tag: "bytes,50000,opt,name=etcd_version_msg", + Filename: "version.proto", +} + +var E_EtcdVersionField = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 50001, + Name: "versionpb.etcd_version_field", + Tag: "bytes,50001,opt,name=etcd_version_field", + Filename: "version.proto", +} + +var E_EtcdVersionEnum = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*string)(nil), + Field: 50002, + Name: "versionpb.etcd_version_enum", + Tag: "bytes,50002,opt,name=etcd_version_enum", + Filename: "version.proto", +} + +var E_EtcdVersionEnumValue = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumValueOptions)(nil), + ExtensionType: (*string)(nil), + Field: 50003, + Name: "versionpb.etcd_version_enum_value", + Tag: "bytes,50003,opt,name=etcd_version_enum_value", + Filename: "version.proto", +} + +func init() { + proto.RegisterExtension(E_EtcdVersionMsg) + proto.RegisterExtension(E_EtcdVersionField) + proto.RegisterExtension(E_EtcdVersionEnum) + proto.RegisterExtension(E_EtcdVersionEnumValue) +} + +func init() { proto.RegisterFile("version.proto", fileDescriptor_7d2c07d79758f814) } + +var fileDescriptor_7d2c07d79758f814 = []byte{ + // 284 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0xd1, 0xc1, 0x4a, 0xc3, 0x30, + 0x18, 0x07, 0x70, 0x83, 0x20, 0x2c, 0xa0, 0xce, 0x30, 0x50, 0x86, 0xd6, 0x7a, 0xf3, 0x94, 0x80, + 0xbb, 0xed, 0x28, 0xe8, 0xad, 0x2a, 0x1e, 0x76, 0x10, 0xa4, 0xb4, 0x6b, 0x16, 0x02, 0x6d, 0xbf, + 0xd0, 0xb4, 0x7d, 0x04, 0xd9, 0x23, 0xf8, 0x48, 0x1e, 0xa7, 0xbe, 0x80, 0xd4, 0x17, 0x91, 0xa4, + 0xa9, 0xac, 0xd6, 0x53, 0xfb, 0x7d, 0xdf, 0xff, 0xff, 0xeb, 0xa1, 0x78, 0xbf, 0xe6, 0x85, 0x96, + 0x90, 0x53, 0x55, 0x40, 0x09, 0x64, 0xe4, 0x46, 0x15, 0x4f, 0x27, 0x02, 0x04, 0xd8, 0x2d, 0x33, + 0x6f, 0x6d, 0x60, 0xea, 0x0b, 0x00, 0x91, 0x72, 0x66, 0xa7, 0xb8, 0x5a, 0xb1, 0x84, 0xeb, 0x65, + 0x21, 0x55, 0x09, 0x45, 0x9b, 0x98, 0xdf, 0xe1, 0x31, 0x2f, 0x97, 0x49, 0xe8, 0xa4, 0x30, 0xd3, + 0x82, 0x9c, 0xd3, 0xb6, 0x46, 0xbb, 0x1a, 0x0d, 0xb8, 0xd6, 0x91, 0xe0, 0xf7, 0xaa, 0x94, 0x90, + 0xeb, 0x93, 0xcd, 0xcb, 0xae, 0x8f, 0x2e, 0x47, 0x8f, 0x07, 0xa6, 0xba, 0x68, 0x9b, 0x81, 0x16, + 0x6b, 0x84, 0xe6, 0x0f, 0x98, 0xf4, 0xbc, 0x95, 0xe4, 0x69, 0x42, 0xce, 0x06, 0xe2, 0xad, 0xd9, + 0x77, 0xde, 0xbb, 0xf3, 0xc6, 0x5b, 0x9e, 0x0d, 0x18, 0x31, 0xc0, 0x47, 0x3d, 0x91, 0xe7, 0x55, + 0x46, 0x4e, 0x07, 0xe0, 0x4d, 0x5e, 0x65, 0x9d, 0xf7, 0xe1, 0xbc, 0xc3, 0x2d, 0xcf, 0xdc, 0x0d, + 0xf7, 0x8c, 0x8f, 0x07, 0x5c, 0x58, 0x47, 0x69, 0xc5, 0xc9, 0xc5, 0xbf, 0xe8, 0xc2, 0xdc, 0x3a, + 0xf9, 0xd3, 0xc9, 0x93, 0x3f, 0xb2, 0x0d, 0xad, 0x11, 0xba, 0xbe, 0x7a, 0x6b, 0x3c, 0xb4, 0x69, + 0x3c, 0xf4, 0xd5, 0x78, 0xe8, 0xf5, 0xdb, 0xdb, 0x79, 0xf2, 0x05, 0x50, 0x93, 0xa6, 0x12, 0x98, + 0x79, 0xb2, 0x48, 0x49, 0x56, 0xcf, 0xd8, 0xef, 0xbf, 0x8b, 0xf7, 0xec, 0xf7, 0x66, 0x3f, 0x01, + 0x00, 0x00, 0xff, 0xff, 0xe8, 0x02, 0x15, 0xc0, 0xde, 0x01, 0x00, 0x00, +} diff --git a/vendor/go.etcd.io/etcd/api/v3/versionpb/version.proto b/vendor/go.etcd.io/etcd/api/v3/versionpb/version.proto new file mode 100644 index 0000000000000..c81b2f58a44d4 --- /dev/null +++ b/vendor/go.etcd.io/etcd/api/v3/versionpb/version.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; +package versionpb; + +import "gogoproto/gogo.proto"; +import "google/protobuf/descriptor.proto"; + +option go_package = "go.etcd.io/etcd/api/v3/versionpb"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; + +// Indicates etcd version that introduced the message, used to determine minimal etcd version required to interpret wal that includes this message. +extend google.protobuf.MessageOptions { + optional string etcd_version_msg = 50000; +} + +// Indicates etcd version that introduced the field, used to determine minimal etcd version required to interpret wal that sets this field. +extend google.protobuf.FieldOptions { + optional string etcd_version_field = 50001; +} + +// Indicates etcd version that introduced the enum, used to determine minimal etcd version required to interpret wal that uses this enum. +extend google.protobuf.EnumOptions { + optional string etcd_version_enum = 50002; +} + +// Indicates etcd version that introduced the enum value, used to determine minimal etcd version required to interpret wal that sets this enum value. +extend google.protobuf.EnumValueOptions { + optional string etcd_version_enum_value = 50003; +} diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/dir_unix.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/dir_unix.go index ca82f765c990a..42221f4b90a74 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/dir_unix.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/dir_unix.go @@ -13,7 +13,6 @@ // limitations under the License. //go:build !windows -// +build !windows package fileutil @@ -21,7 +20,7 @@ import "os" const ( // PrivateDirMode grants owner to make/remove files inside the directory. - PrivateDirMode = 0700 + PrivateDirMode = 0o700 ) // OpenDir opens a directory for syncing. diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/dir_windows.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/dir_windows.go index 849c63c8769c9..fd3415d5944fd 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/dir_windows.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/dir_windows.go @@ -13,7 +13,6 @@ // limitations under the License. //go:build windows -// +build windows package fileutil diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/fileutil.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/fileutil.go index d31ece3e24ea8..b68c50415eee1 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/fileutil.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/fileutil.go @@ -17,16 +17,18 @@ package fileutil import ( "fmt" "io" - "io/ioutil" + "io/fs" "os" "path/filepath" "go.uber.org/zap" + + "go.etcd.io/etcd/client/pkg/v3/verify" ) const ( // PrivateFileMode grants owner to read/write a file. - PrivateFileMode = 0600 + PrivateFileMode = 0o600 ) // IsDirWriteable checks if dir is writable by writing and removing a file @@ -36,7 +38,7 @@ func IsDirWriteable(dir string) error { if err != nil { return err } - if err := ioutil.WriteFile(f, []byte(""), PrivateFileMode); err != nil { + if err := os.WriteFile(f, []byte(""), PrivateFileMode); err != nil { return err } return os.Remove(f) @@ -45,8 +47,9 @@ func IsDirWriteable(dir string) error { // TouchDirAll is similar to os.MkdirAll. It creates directories with 0700 permission if any directory // does not exists. TouchDirAll also ensures the given directory is writable. func TouchDirAll(lg *zap.Logger, dir string) error { + verify.Assert(lg != nil, "nil log isn't allowed") // If path is already a directory, MkdirAll does nothing and returns nil, so, - // first check if dir exist with an expected permission mode. + // first check if dir exists with an expected permission mode. if Exist(dir) { err := CheckDirPermission(dir, PrivateDirMode) if err != nil { @@ -122,7 +125,7 @@ func CheckDirPermission(dir string, perm os.FileMode) error { if !Exist(dir) { return fmt.Errorf("directory %q empty, cannot check permission", dir) } - //check the existing permission on the directory + // check the existing permission on the directory dirInfo, err := os.Stat(dir) if err != nil { return err @@ -166,3 +169,16 @@ func RemoveMatchFile(lg *zap.Logger, dir string, matchFunc func(fileName string) } return nil } + +// ListFiles lists files if matchFunc is true on an existing dir +// Returns error if the dir does not exist +func ListFiles(dir string, matchFunc func(fileName string) bool) ([]string, error) { + var files []string + err := filepath.Walk(dir, func(path string, info fs.FileInfo, err error) error { + if matchFunc(path) { + files = append(files, path) + } + return nil + }) + return files, err +} diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock.go index 338627f43c88d..dd2fa545d227c 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock.go @@ -19,8 +19,6 @@ import ( "os" ) -var ( - ErrLocked = errors.New("fileutil: file already locked") -) +var ErrLocked = errors.New("fileutil: file already locked") type LockedFile struct{ *os.File } diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_flock.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_flock.go index dcdf226cdbfde..178c987a4a396 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_flock.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_flock.go @@ -13,11 +13,11 @@ // limitations under the License. //go:build !windows && !plan9 && !solaris -// +build !windows,!plan9,!solaris package fileutil import ( + "errors" "os" "syscall" ) @@ -29,7 +29,7 @@ func flockTryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, err } if err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX|syscall.LOCK_NB); err != nil { f.Close() - if err == syscall.EWOULDBLOCK { + if errors.Is(err, syscall.EWOULDBLOCK) { err = ErrLocked } return nil, err diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_linux.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_linux.go index d8952cc481b05..609ac397849b9 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_linux.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_linux.go @@ -13,11 +13,11 @@ // limitations under the License. //go:build linux -// +build linux package fileutil import ( + "errors" "fmt" "io" "os" @@ -59,13 +59,13 @@ func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { func ofdTryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { f, err := os.OpenFile(path, flag, perm) if err != nil { - return nil, fmt.Errorf("ofdTryLockFile failed to open %q (%v)", path, err) + return nil, fmt.Errorf("ofdTryLockFile failed to open %q (%w)", path, err) } flock := wrlck if err = syscall.FcntlFlock(f.Fd(), unix.F_OFD_SETLK, &flock); err != nil { f.Close() - if err == syscall.EWOULDBLOCK { + if errors.Is(err, syscall.EWOULDBLOCK) { err = ErrLocked } return nil, err @@ -80,7 +80,7 @@ func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { func ofdLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { f, err := os.OpenFile(path, flag, perm) if err != nil { - return nil, fmt.Errorf("ofdLockFile failed to open %q (%v)", path, err) + return nil, fmt.Errorf("ofdLockFile failed to open %q (%w)", path, err) } flock := wrlck diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_solaris.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_solaris.go index 683cc1db9c417..2e892fecc6546 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_solaris.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_solaris.go @@ -13,7 +13,6 @@ // limitations under the License. //go:build solaris -// +build solaris package fileutil diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_unix.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_unix.go index d89027e1fad60..05db53674105a 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_unix.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_unix.go @@ -13,7 +13,6 @@ // limitations under the License. //go:build !windows && !plan9 && !solaris && !linux -// +build !windows,!plan9,!solaris,!linux package fileutil diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_windows.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_windows.go index 5cbf2bc3d5e86..51010bdf81ce7 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_windows.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_windows.go @@ -13,7 +13,6 @@ // limitations under the License. //go:build windows -// +build windows package fileutil @@ -22,31 +21,18 @@ import ( "fmt" "os" "syscall" - "unsafe" -) - -var ( - modkernel32 = syscall.NewLazyDLL("kernel32.dll") - procLockFileEx = modkernel32.NewProc("LockFileEx") - errLocked = errors.New("the process cannot access the file because another process has locked a portion of the file") + "golang.org/x/sys/windows" ) -const ( - // https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx - LOCKFILE_EXCLUSIVE_LOCK = 2 - LOCKFILE_FAIL_IMMEDIATELY = 1 - - // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx - errLockViolation syscall.Errno = 0x21 -) +var errLocked = errors.New("the process cannot access the file because another process has locked a portion of the file") func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { f, err := open(path, flag, perm) if err != nil { return nil, err } - if err := lockFile(syscall.Handle(f.Fd()), LOCKFILE_FAIL_IMMEDIATELY); err != nil { + if err := lockFile(windows.Handle(f.Fd()), windows.LOCKFILE_FAIL_IMMEDIATELY); err != nil { f.Close() return nil, err } @@ -58,7 +44,7 @@ func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { if err != nil { return nil, err } - if err := lockFile(syscall.Handle(f.Fd()), 0); err != nil { + if err := lockFile(windows.Handle(f.Fd()), 0); err != nil { f.Close() return nil, err } @@ -67,7 +53,7 @@ func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { func open(path string, flag int, perm os.FileMode) (*os.File, error) { if path == "" { - return nil, fmt.Errorf("cannot open empty filename") + return nil, errors.New("cannot open empty filename") } var access uint32 switch flag { @@ -95,32 +81,17 @@ func open(path string, flag int, perm os.FileMode) (*os.File, error) { return os.NewFile(uintptr(fd), path), nil } -func lockFile(fd syscall.Handle, flags uint32) error { - var flag uint32 = LOCKFILE_EXCLUSIVE_LOCK - flag |= flags - if fd == syscall.InvalidHandle { +func lockFile(fd windows.Handle, flags uint32) error { + if fd == windows.InvalidHandle { return nil } - err := lockFileEx(fd, flag, 1, 0, &syscall.Overlapped{}) + err := windows.LockFileEx(fd, flags|windows.LOCKFILE_EXCLUSIVE_LOCK, 0, 1, 0, &windows.Overlapped{}) if err == nil { return nil } else if err.Error() == errLocked.Error() { return ErrLocked - } else if err != errLockViolation { + } else if err != windows.ERROR_LOCK_VIOLATION { return err } return nil } - -func lockFileEx(h syscall.Handle, flags, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { - var reserved uint32 = 0 - r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol))) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return err -} diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate.go index c747b7cf81f93..aadbff7131d7f 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate.go @@ -19,9 +19,9 @@ import ( "os" ) -// Preallocate tries to allocate the space for given -// file. This operation is only supported on linux by a -// few filesystems (btrfs, ext4, etc.). +// Preallocate tries to allocate the space for given file. This +// operation is only supported on darwin and linux by a few +// filesystems (APFS, btrfs, ext4, etc.). // If the operation is unsupported, no error will be returned. // Otherwise, the error encountered will be returned. func Preallocate(f *os.File, sizeInBytes int64, extendFile bool) error { diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate_darwin.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate_darwin.go index caab143dd301e..72430ec273b8f 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate_darwin.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate_darwin.go @@ -13,11 +13,11 @@ // limitations under the License. //go:build darwin -// +build darwin package fileutil import ( + "errors" "os" "syscall" @@ -40,7 +40,7 @@ func preallocFixed(f *os.File, sizeInBytes int64) error { Length: sizeInBytes, } err := unix.FcntlFstore(f.Fd(), unix.F_PREALLOCATE, fstore) - if err == nil || err == unix.ENOTSUP { + if err == nil || errors.Is(err, unix.ENOTSUP) { return nil } diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate_unix.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate_unix.go index ebb8207c3408d..b0a8166ae144f 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate_unix.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate_unix.go @@ -13,11 +13,11 @@ // limitations under the License. //go:build linux -// +build linux package fileutil import ( + "errors" "os" "syscall" ) @@ -26,10 +26,10 @@ func preallocExtend(f *os.File, sizeInBytes int64) error { // use mode = 0 to change size err := syscall.Fallocate(int(f.Fd()), 0, 0, sizeInBytes) if err != nil { - errno, ok := err.(syscall.Errno) + var errno syscall.Errno // not supported; fallback // fallocate EINTRs frequently in some environments; fallback - if ok && (errno == syscall.ENOTSUP || errno == syscall.EINTR) { + if errors.As(err, &errno) && (errno == syscall.ENOTSUP || errno == syscall.EINTR) { return preallocExtendTrunc(f, sizeInBytes) } } @@ -40,9 +40,9 @@ func preallocFixed(f *os.File, sizeInBytes int64) error { // use mode = 1 to keep size; see FALLOC_FL_KEEP_SIZE err := syscall.Fallocate(int(f.Fd()), 1, 0, sizeInBytes) if err != nil { - errno, ok := err.(syscall.Errno) + var errno syscall.Errno // treat not supported as nil error - if ok && errno == syscall.ENOTSUP { + if errors.As(err, &errno) && errno == syscall.ENOTSUP { return nil } } diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate_unsupported.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate_unsupported.go index 2c46dd4907553..e7fd937a43696 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate_unsupported.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate_unsupported.go @@ -13,7 +13,6 @@ // limitations under the License. //go:build !linux && !darwin -// +build !linux,!darwin package fileutil diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/purge.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/purge.go index b314e068fea1a..026ea03230fa3 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/purge.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/purge.go @@ -17,7 +17,6 @@ package fileutil import ( "os" "path/filepath" - "sort" "strings" "time" @@ -58,21 +57,14 @@ func purgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval defer close(donec) } for { - fnames, err := ReadDir(dirname) + fnamesWithSuffix, err := readDirWithSuffix(dirname, suffix) if err != nil { errC <- err return } - newfnames := make([]string, 0) - for _, fname := range fnames { - if strings.HasSuffix(fname, suffix) { - newfnames = append(newfnames, fname) - } - } - sort.Strings(newfnames) - fnames = newfnames - for len(newfnames) > int(max) { - f := filepath.Join(dirname, newfnames[0]) + nPurged := 0 + for nPurged < len(fnamesWithSuffix)-int(max) { + f := filepath.Join(dirname, fnamesWithSuffix[nPurged]) var l *LockedFile if flock { l, err = TryLockFile(f, os.O_WRONLY, PrivateFileMode) @@ -94,11 +86,12 @@ func purgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval } } lg.Info("purged", zap.String("path", f)) - newfnames = newfnames[1:] + nPurged++ } + if purgec != nil { - for i := 0; i < len(fnames)-len(newfnames); i++ { - purgec <- fnames[i] + for i := 0; i < nPurged; i++ { + purgec <- fnamesWithSuffix[i] } } select { @@ -110,3 +103,18 @@ func purgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval }() return errC } + +func readDirWithSuffix(dirname string, suffix string) ([]string, error) { + fnames, err := ReadDir(dirname) + if err != nil { + return nil, err + } + // filter in place (ref. https://go.dev/wiki/SliceTricks#filtering-without-allocating) + fnamesWithSuffix := fnames[:0] + for _, fname := range fnames { + if strings.HasSuffix(fname, suffix) { + fnamesWithSuffix = append(fnamesWithSuffix, fname) + } + } + return fnamesWithSuffix, nil +} diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/sync.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/sync.go index 0a0855309e9ef..670d01fadcc27 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/sync.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/sync.go @@ -13,7 +13,6 @@ // limitations under the License. //go:build !linux && !darwin -// +build !linux,!darwin package fileutil diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/sync_darwin.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/sync_darwin.go index 1923b276ea072..7affa78ea6404 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/sync_darwin.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/sync_darwin.go @@ -13,7 +13,6 @@ // limitations under the License. //go:build darwin -// +build darwin package fileutil diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/sync_linux.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/sync_linux.go index b9398c23f9476..a3172382e5ae9 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/sync_linux.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/sync_linux.go @@ -13,7 +13,6 @@ // limitations under the License. //go:build linux -// +build linux package fileutil diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/log_format.go b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/log_format.go new file mode 100644 index 0000000000000..286d385ba3990 --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/log_format.go @@ -0,0 +1,42 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logutil + +import "fmt" + +const ( + JSONLogFormat = "json" + ConsoleLogFormat = "console" + //revive:disable:var-naming + // Deprecated: Please use JSONLogFormat. + JsonLogFormat = JSONLogFormat + //revive:enable:var-naming +) + +var DefaultLogFormat = JSONLogFormat + +// ConvertToZapFormat converts and validated log format string. +func ConvertToZapFormat(format string) (string, error) { + switch format { + case ConsoleLogFormat: + return ConsoleLogFormat, nil + case JSONLogFormat: + return JSONLogFormat, nil + case "": + return DefaultLogFormat, nil + default: + return "", fmt.Errorf("unknown log format: %s, supported values json, console", format) + } +} diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap.go b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap.go index f0f3739aad81f..1a502d6d169d6 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap.go @@ -43,7 +43,7 @@ var DefaultZapLoggerConfig = zap.Config{ Thereafter: 100, }, - Encoding: "json", + Encoding: DefaultLogFormat, // copied from "zap.NewProductionEncoderConfig" with some updates EncoderConfig: zapcore.EncoderConfig{ diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap_journal.go b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap_journal.go index 9daa3e0aab1d5..06dc40dacd985 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap_journal.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap_journal.go @@ -13,7 +13,6 @@ // limitations under the License. //go:build !windows -// +build !windows package logutil @@ -25,10 +24,10 @@ import ( "os" "path/filepath" - "go.etcd.io/etcd/client/pkg/v3/systemd" - "github.com/coreos/go-systemd/v22/journal" "go.uber.org/zap/zapcore" + + "go.etcd.io/etcd/client/pkg/v3/systemd" ) // NewJournalWriter wraps "io.Writer" to redirect log output diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/srv/srv.go b/vendor/go.etcd.io/etcd/client/pkg/v3/srv/srv.go index 948c683490911..e0a1ccef35cce 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/srv/srv.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/srv/srv.go @@ -33,7 +33,6 @@ var ( // GetCluster gets the cluster information via DNS discovery. // Also sees each entry as a separate instance. func GetCluster(serviceScheme, service, name, dns string, apurls types.URLs) ([]string, error) { - tempName := int(0) tcp2ap := make(map[string]url.URL) // First, resolve the apurls @@ -45,7 +44,10 @@ func GetCluster(serviceScheme, service, name, dns string, apurls types.URLs) ([] tcp2ap[tcpAddr.String()] = url } - stringParts := []string{} + var ( + tempName int + stringParts []string + ) updateNodeMap := func(service, scheme string) error { _, addrs, err := lookupSRV(service, "tcp", dns) if err != nil { @@ -85,7 +87,7 @@ func GetCluster(serviceScheme, service, name, dns string, apurls types.URLs) ([] err := updateNodeMap(service, serviceScheme) if err != nil { - return nil, fmt.Errorf("error querying DNS SRV records for _%s %s", service, err) + return nil, fmt.Errorf("error querying DNS SRV records for _%s %w", service, err) } return stringParts, nil } @@ -97,8 +99,10 @@ type SRVClients struct { // GetClient looks up the client endpoints for a service and domain. func GetClient(service, domain string, serviceName string) (*SRVClients, error) { - var urls []*url.URL - var srvs []*net.SRV + var ( + urls []*url.URL + srvs []*net.SRV + ) updateURLs := func(service, scheme string) error { _, addrs, err := lookupSRV(service, "tcp", domain) @@ -119,7 +123,7 @@ func GetClient(service, domain string, serviceName string) (*SRVClients, error) errHTTP := updateURLs(GetSRVService(service, serviceName, "http"), "http") if errHTTPS != nil && errHTTP != nil { - return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP) + return nil, fmt.Errorf("dns lookup errors: %w and %w", errHTTPS, errHTTP) } endpoints := make([]string, len(urls)) diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/tlsutil.go b/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/tlsutil.go index 3a5aef089a784..0f79865e805ed 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/tlsutil.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/tlsutil.go @@ -18,7 +18,7 @@ import ( "crypto/tls" "crypto/x509" "encoding/pem" - "io/ioutil" + "os" ) // NewCertPool creates x509 certPool with provided CA files. @@ -26,7 +26,7 @@ func NewCertPool(CAFiles []string) (*x509.CertPool, error) { certPool := x509.NewCertPool() for _, CAFile := range CAFiles { - pemByte, err := ioutil.ReadFile(CAFile) + pemByte, err := os.ReadFile(CAFile) if err != nil { return nil, err } @@ -51,12 +51,12 @@ func NewCertPool(CAFiles []string) (*x509.CertPool, error) { // NewCert generates TLS cert by using the given cert,key and parse function. func NewCert(certfile, keyfile string, parseFunc func([]byte, []byte) (tls.Certificate, error)) (*tls.Certificate, error) { - cert, err := ioutil.ReadFile(certfile) + cert, err := os.ReadFile(certfile) if err != nil { return nil, err } - key, err := ioutil.ReadFile(keyfile) + key, err := os.ReadFile(keyfile) if err != nil { return nil, err } diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/keepalive_listener.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/keepalive_listener.go index d00a93dc19ef1..d43ac4f078aba 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/keepalive_listener.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/keepalive_listener.go @@ -16,6 +16,7 @@ package transport import ( "crypto/tls" + "errors" "fmt" "net" "time" @@ -38,7 +39,7 @@ func NewKeepAliveListener(l net.Listener, scheme string, tlscfg *tls.Config) (ne if scheme == "https" { if tlscfg == nil { - return nil, fmt.Errorf("cannot listen on TLS for given listener: KeyFile and CertFile are not presented") + return nil, errors.New("cannot listen on TLS for given listener: KeyFile and CertFile are not presented") } return newTLSKeepaliveListener(kal, tlscfg), nil } @@ -95,16 +96,17 @@ type tlsKeepaliveListener struct { // Accept waits for and returns the next incoming TLS connection. // The returned connection c is a *tls.Conn. -func (l *tlsKeepaliveListener) Accept() (c net.Conn, err error) { - c, err = l.Listener.Accept() +func (l *tlsKeepaliveListener) Accept() (net.Conn, error) { + c, err := l.Listener.Accept() if err != nil { - return + return nil, err } + c = tls.Server(c, l.config) return c, nil } -// NewListener creates a Listener which accepts connections from an inner +// newTLSKeepaliveListener creates a Listener which accepts connections from an inner // Listener and wraps each connection with Server. // The configuration config must be non-nil and must have // at least one certificate. diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/limit_listen.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/limit_listen.go index 404722ba76edf..bf4c4e104a277 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/limit_listen.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/limit_listen.go @@ -23,9 +23,7 @@ import ( "time" ) -var ( - ErrNotTCP = errors.New("only tcp connections have keepalive") -) +var ErrNotTCP = errors.New("only tcp connections have keepalive") // LimitListener returns a Listener that accepts at most n simultaneous // connections from the provided Listener. diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener.go index a7d37688d92fa..9c2d29ba998ec 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener.go @@ -32,10 +32,11 @@ import ( "strings" "time" + "go.uber.org/zap" + "go.etcd.io/etcd/client/pkg/v3/fileutil" "go.etcd.io/etcd/client/pkg/v3/tlsutil" - - "go.uber.org/zap" + "go.etcd.io/etcd/client/pkg/v3/verify" ) // NewListener creates a new listner. @@ -43,7 +44,7 @@ func NewListener(addr, scheme string, tlsinfo *TLSInfo) (l net.Listener, err err return newListener(addr, scheme, WithTLSInfo(tlsinfo)) } -// NewListenerWithOpts creates a new listener which accpets listener options. +// NewListenerWithOpts creates a new listener which accepts listener options. func NewListenerWithOpts(addr, scheme string, opts ...ListenerOption) (net.Listener, error) { return newListener(addr, scheme, opts...) } @@ -59,11 +60,7 @@ func newListener(addr, scheme string, opts ...ListenerOption) (net.Listener, err switch { case lnOpts.IsSocketOpts(): // new ListenConfig with socket options. - config, err := newListenConfig(lnOpts.socketOpts) - if err != nil { - return nil, err - } - lnOpts.ListenConfig = config + lnOpts.ListenConfig = newListenConfig(lnOpts.socketOpts) // check for timeout fallthrough case lnOpts.IsTimeout(), lnOpts.IsSocketOpts(): @@ -102,14 +99,17 @@ func newListener(addr, scheme string, opts ...ListenerOption) (net.Listener, err return wrapTLS(scheme, lnOpts.tlsInfo, lnOpts.Listener) } -func newKeepAliveListener(cfg *net.ListenConfig, addr string) (ln net.Listener, err error) { +func newKeepAliveListener(cfg *net.ListenConfig, addr string) (net.Listener, error) { + var ln net.Listener + var err error + if cfg != nil { ln, err = cfg.Listen(context.TODO(), "tcp", addr) } else { ln, err = net.Listen("tcp", addr) } if err != nil { - return + return nil, err } return NewKeepAliveListener(ln, "tcp", nil) @@ -125,7 +125,7 @@ func wrapTLS(scheme string, tlsinfo *TLSInfo, l net.Listener) (net.Listener, err return newTLSListener(l, tlsinfo, checkSAN) } -func newListenConfig(sopts *SocketOpts) (net.ListenConfig, error) { +func newListenConfig(sopts *SocketOpts) net.ListenConfig { lc := net.ListenConfig{} if sopts != nil { ctls := getControls(sopts) @@ -133,7 +133,7 @@ func newListenConfig(sopts *SocketOpts) (net.ListenConfig, error) { lc.Control = ctls.Control } } - return lc, nil + return lc } type TLSInfo struct { @@ -204,6 +204,9 @@ type TLSInfo struct { // EmptyCN indicates that the cert must have empty CN. // If true, ClientConfig() will return an error for a cert with non empty CN. EmptyCN bool + + // LocalAddr is the local IP address to use when communicating with a peer. + LocalAddr string } func (info TLSInfo) String() string { @@ -214,34 +217,35 @@ func (info TLSInfo) Empty() bool { return info.CertFile == "" && info.KeyFile == "" } -func SelfCert(lg *zap.Logger, dirpath string, hosts []string, selfSignedCertValidity uint, additionalUsages ...x509.ExtKeyUsage) (info TLSInfo, err error) { - info.Logger = lg +func SelfCert(lg *zap.Logger, dirpath string, hosts []string, selfSignedCertValidity uint, additionalUsages ...x509.ExtKeyUsage) (TLSInfo, error) { + verify.Assert(lg != nil, "nil log isn't allowed") + + var err error + info := TLSInfo{Logger: lg} if selfSignedCertValidity == 0 { - err = fmt.Errorf("selfSignedCertValidity is invalid,it should be greater than 0") + err = errors.New("selfSignedCertValidity is invalid,it should be greater than 0") info.Logger.Warn( "cannot generate cert", zap.Error(err), ) - return + return info, err } err = fileutil.TouchDirAll(lg, dirpath) if err != nil { - if info.Logger != nil { - info.Logger.Warn( - "cannot create cert directory", - zap.Error(err), - ) - } - return + info.Logger.Warn( + "cannot create cert directory", + zap.Error(err), + ) + return info, err } certPath, err := filepath.Abs(filepath.Join(dirpath, "cert.pem")) if err != nil { - return + return info, err } keyPath, err := filepath.Abs(filepath.Join(dirpath, "key.pem")) if err != nil { - return + return info, err } _, errcert := os.Stat(certPath) _, errkey := os.Stat(keyPath) @@ -251,19 +255,17 @@ func SelfCert(lg *zap.Logger, dirpath string, hosts []string, selfSignedCertVali info.ClientCertFile = certPath info.ClientKeyFile = keyPath info.selfCert = true - return + return info, err } serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) if err != nil { - if info.Logger != nil { - info.Logger.Warn( - "cannot generate random number", - zap.Error(err), - ) - } - return + info.Logger.Warn( + "cannot generate random number", + zap.Error(err), + ) + return info, err } tmpl := x509.Certificate{ @@ -272,17 +274,16 @@ func SelfCert(lg *zap.Logger, dirpath string, hosts []string, selfSignedCertVali NotBefore: time.Now(), NotAfter: time.Now().Add(time.Duration(selfSignedCertValidity) * 365 * (24 * time.Hour)), - KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCRLSign, ExtKeyUsage: append([]x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, additionalUsages...), BasicConstraintsValid: true, + IsCA: true, } - if info.Logger != nil { - info.Logger.Warn( - "automatically generate certificates", - zap.Time("certificate-validity-bound-not-after", tmpl.NotAfter), - ) - } + info.Logger.Warn( + "automatically generate certificates", + zap.Time("certificate-validity-bound-not-after", tmpl.NotAfter), + ) for _, host := range hosts { h, _, _ := net.SplitHostPort(host) @@ -295,24 +296,20 @@ func SelfCert(lg *zap.Logger, dirpath string, hosts []string, selfSignedCertVali priv, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) if err != nil { - if info.Logger != nil { - info.Logger.Warn( - "cannot generate ECDSA key", - zap.Error(err), - ) - } - return + info.Logger.Warn( + "cannot generate ECDSA key", + zap.Error(err), + ) + return info, err } derBytes, err := x509.CreateCertificate(rand.Reader, &tmpl, &tmpl, &priv.PublicKey, priv) if err != nil { - if info.Logger != nil { - info.Logger.Warn( - "cannot generate x509 certificate", - zap.Error(err), - ) - } - return + info.Logger.Warn( + "cannot generate x509 certificate", + zap.Error(err), + ) + return info, err } certOut, err := os.Create(certPath) @@ -322,34 +319,29 @@ func SelfCert(lg *zap.Logger, dirpath string, hosts []string, selfSignedCertVali zap.String("path", certPath), zap.Error(err), ) - return + return info, err } pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) certOut.Close() - if info.Logger != nil { - info.Logger.Info("created cert file", zap.String("path", certPath)) - } + + info.Logger.Info("created cert file", zap.String("path", certPath)) b, err := x509.MarshalECPrivateKey(priv) if err != nil { - return + return info, err } - keyOut, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + keyOut, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600) if err != nil { - if info.Logger != nil { - info.Logger.Warn( - "cannot key file", - zap.String("path", keyPath), - zap.Error(err), - ) - } - return + info.Logger.Warn( + "cannot key file", + zap.String("path", keyPath), + zap.Error(err), + ) + return info, err } pem.Encode(keyOut, &pem.Block{Type: "EC PRIVATE KEY", Bytes: b}) keyOut.Close() - if info.Logger != nil { - info.Logger.Info("created key file", zap.String("path", keyPath)) - } + info.Logger.Info("created key file", zap.String("path", keyPath)) return SelfCert(lg, dirpath, hosts, selfSignedCertValidity) } @@ -482,23 +474,19 @@ func (info TLSInfo) baseConfig() (*tls.Config, error) { cfg.GetCertificate = func(clientHello *tls.ClientHelloInfo) (cert *tls.Certificate, err error) { cert, err = tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) if os.IsNotExist(err) { - if info.Logger != nil { - info.Logger.Warn( - "failed to find peer cert files", - zap.String("cert-file", info.CertFile), - zap.String("key-file", info.KeyFile), - zap.Error(err), - ) - } + info.Logger.Warn( + "failed to find peer cert files", + zap.String("cert-file", info.CertFile), + zap.String("key-file", info.KeyFile), + zap.Error(err), + ) } else if err != nil { - if info.Logger != nil { - info.Logger.Warn( - "failed to create peer certificate", - zap.String("cert-file", info.CertFile), - zap.String("key-file", info.KeyFile), - zap.Error(err), - ) - } + info.Logger.Warn( + "failed to create peer certificate", + zap.String("cert-file", info.CertFile), + zap.String("key-file", info.KeyFile), + zap.Error(err), + ) } return cert, err } @@ -509,23 +497,19 @@ func (info TLSInfo) baseConfig() (*tls.Config, error) { } cert, err = tlsutil.NewCert(certfile, keyfile, info.parseFunc) if os.IsNotExist(err) { - if info.Logger != nil { - info.Logger.Warn( - "failed to find client cert files", - zap.String("cert-file", certfile), - zap.String("key-file", keyfile), - zap.Error(err), - ) - } + info.Logger.Warn( + "failed to find client cert files", + zap.String("cert-file", certfile), + zap.String("key-file", keyfile), + zap.Error(err), + ) } else if err != nil { - if info.Logger != nil { - info.Logger.Warn( - "failed to create client certificate", - zap.String("cert-file", certfile), - zap.String("key-file", keyfile), - zap.Error(err), - ) - } + info.Logger.Warn( + "failed to create client certificate", + zap.String("cert-file", certfile), + zap.String("key-file", keyfile), + zap.Error(err), + ) } return cert, err } diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener_opts.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener_opts.go index ad4f6904da901..7536f6aff4626 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener_opts.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener_opts.go @@ -1,3 +1,17 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package transport import ( diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener_tls.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener_tls.go index 6f1600945cc6f..2c94841625b0b 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener_tls.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener_tls.go @@ -19,8 +19,8 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "io/ioutil" "net" + "os" "strings" "sync" ) @@ -168,16 +168,16 @@ func (l *tlsListener) acceptLoop() { func checkCRL(crlPath string, cert []*x509.Certificate) error { // TODO: cache - crlBytes, err := ioutil.ReadFile(crlPath) + crlBytes, err := os.ReadFile(crlPath) if err != nil { return err } - certList, err := x509.ParseCRL(crlBytes) + certList, err := x509.ParseRevocationList(crlBytes) if err != nil { return err } revokedSerials := make(map[string]struct{}) - for _, rc := range certList.TBSCertList.RevokedCertificates { + for _, rc := range certList.RevokedCertificateEntries { revokedSerials[string(rc.SerialNumber.Bytes())] = struct{}{} } for _, c := range cert { @@ -222,7 +222,8 @@ func checkCertSAN(ctx context.Context, cert *x509.Certificate, remoteAddr string func isHostInDNS(ctx context.Context, host string, dnsNames []string) (ok bool, err error) { // reverse lookup - wildcards, names := []string{}, []string{} + var names []string + var wildcards []string for _, dns := range dnsNames { if strings.HasPrefix(dns, "*.") { wildcards = append(wildcards, dns[1:]) diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt.go index fe028c613232c..49b48dc876797 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt.go @@ -1,3 +1,17 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package transport import ( diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_solaris.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_solaris.go index 495c736365e3d..149ad510240b3 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_solaris.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_solaris.go @@ -13,19 +13,18 @@ // limitations under the License. //go:build solaris -// +build solaris package transport import ( - "fmt" + "errors" "syscall" "golang.org/x/sys/unix" ) func setReusePort(network, address string, c syscall.RawConn) error { - return fmt.Errorf("port reuse is not supported on Solaris") + return errors.New("port reuse is not supported on Solaris") } func setReuseAddress(network, address string, conn syscall.RawConn) error { diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_unix.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_unix.go index e2cc6f482863f..385eadb007449 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_unix.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_unix.go @@ -12,8 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build !windows && !solaris -// +build !windows,!solaris +//go:build !windows && !solaris && !wasm && !js package transport diff --git a/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_wasm.go similarity index 54% rename from vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.go rename to vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_wasm.go index d10ad665333f4..c6590b1d46964 100644 --- a/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_wasm.go @@ -1,10 +1,10 @@ -// Copyright 2020 Google LLC +// Copyright 2023 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -12,12 +12,19 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package field_mask aliases all exported identifiers in -// package "google.golang.org/protobuf/types/known/fieldmaskpb". -package field_mask +//go:build wasm || js -import "google.golang.org/protobuf/types/known/fieldmaskpb" +package transport -type FieldMask = fieldmaskpb.FieldMask +import ( + "errors" + "syscall" +) -var File_google_protobuf_field_mask_proto = fieldmaskpb.File_google_protobuf_field_mask_proto +func setReusePort(network, address string, c syscall.RawConn) error { + return errors.New("port reuse is not supported on WASM") +} + +func setReuseAddress(network, addr string, conn syscall.RawConn) error { + return errors.New("address reuse is not supported on WASM") +} diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_windows.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_windows.go index 4e5af70b11ebe..2670b4dc7b53e 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_windows.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_windows.go @@ -1,19 +1,32 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + //go:build windows -// +build windows package transport import ( - "fmt" + "errors" "syscall" ) func setReusePort(network, address string, c syscall.RawConn) error { - return fmt.Errorf("port reuse is not supported on Windows") + return errors.New("port reuse is not supported on Windows") } // Windows supports SO_REUSEADDR, but it may cause undefined behavior, as // there is no protection against port hijacking. func setReuseAddress(network, addr string, conn syscall.RawConn) error { - return fmt.Errorf("address reuse is not supported on Windows") + return errors.New("address reuse is not supported on Windows") } diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/tls.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/tls.go index 8c3a35b140bbd..d5375863fd5d6 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/tls.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/tls.go @@ -16,6 +16,7 @@ package transport import ( "context" + "errors" "fmt" "strings" "time" @@ -46,7 +47,7 @@ func ValidateSecureEndpoints(tlsInfo TLSInfo, eps []string) ([]string, error) { endpoints = append(endpoints, ep) } if len(errs) != 0 { - err = fmt.Errorf("%s", strings.Join(errs, ",")) + err = errors.New(strings.Join(errs, ",")) } return endpoints, err } diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/transport.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/transport.go index 648512772d370..67170d7436d07 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/transport.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/transport.go @@ -30,10 +30,19 @@ func NewTransport(info TLSInfo, dialtimeoutd time.Duration) (*http.Transport, er return nil, err } + var ipAddr net.Addr + if info.LocalAddr != "" { + ipAddr, err = net.ResolveTCPAddr("tcp", info.LocalAddr+":0") + if err != nil { + return nil, err + } + } + t := &http.Transport{ Proxy: http.ProxyFromEnvironment, DialContext: (&net.Dialer{ - Timeout: dialtimeoutd, + Timeout: dialtimeoutd, + LocalAddr: ipAddr, // value taken from http.DefaultTransport KeepAlive: 30 * time.Second, }).DialContext, @@ -57,7 +66,7 @@ func NewTransport(info TLSInfo, dialtimeoutd time.Duration) (*http.Transport, er TLSClientConfig: cfg, // Cost of reopening connection on sockets is low, and they are mostly used in testing. // Long living unix-transport connections were leading to 'leak' test flakes. - // Alternativly the returned Transport (t) should override CloseIdleConnections to + // Alternatively the returned Transport (t) should override CloseIdleConnections to // forward it to 'tu' as well. IdleConnTimeout: time.Microsecond, } diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/types/id.go b/vendor/go.etcd.io/etcd/client/pkg/v3/types/id.go index ae00388dde06c..7a09647b5d52f 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/types/id.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/types/id.go @@ -14,7 +14,10 @@ package types -import "strconv" +import ( + "strconv" + "strings" +) // ID represents a generic identifier which is canonically // stored as a uint64 but is typically represented as a @@ -37,3 +40,17 @@ type IDSlice []ID func (p IDSlice) Len() int { return len(p) } func (p IDSlice) Less(i, j int) bool { return uint64(p[i]) < uint64(p[j]) } func (p IDSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p IDSlice) String() string { + var b strings.Builder + if p.Len() > 0 { + b.WriteString(p[0].String()) + } + + for i := 1; i < p.Len(); i++ { + b.WriteString(",") + b.WriteString(p[i].String()) + } + + return b.String() +} diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/types/set.go b/vendor/go.etcd.io/etcd/client/pkg/v3/types/set.go index e7a3cdc9ab6d6..3e69c8d8b9437 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/types/set.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/types/set.go @@ -90,7 +90,7 @@ func (us *unsafeSet) Length() int { // Values returns the values of the Set in an unspecified order. func (us *unsafeSet) Values() (values []string) { - values = make([]string, 0) + values = make([]string, 0, len(us.d)) for val := range us.d { values = append(values, val) } diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/verify/verify.go b/vendor/go.etcd.io/etcd/client/pkg/v3/verify/verify.go new file mode 100644 index 0000000000000..a7b2097bed9bf --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/verify/verify.go @@ -0,0 +1,80 @@ +// Copyright 2022 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "fmt" + "os" + "strings" +) + +const envVerify = "ETCD_VERIFY" + +type VerificationType string + +const ( + envVerifyValueAll VerificationType = "all" + envVerifyValueAssert VerificationType = "assert" +) + +func getEnvVerify() string { + return strings.ToLower(os.Getenv(envVerify)) +} + +func IsVerificationEnabled(verification VerificationType) bool { + env := getEnvVerify() + return env == string(envVerifyValueAll) || env == strings.ToLower(string(verification)) +} + +// EnableVerifications sets `envVerify` and returns a function that +// can be used to bring the original settings. +func EnableVerifications(verification VerificationType) func() { + previousEnv := getEnvVerify() + os.Setenv(envVerify, string(verification)) + return func() { + os.Setenv(envVerify, previousEnv) + } +} + +// EnableAllVerifications enables verification and returns a function +// that can be used to bring the original settings. +func EnableAllVerifications() func() { + return EnableVerifications(envVerifyValueAll) +} + +// DisableVerifications unsets `envVerify` and returns a function that +// can be used to bring the original settings. +func DisableVerifications() func() { + previousEnv := getEnvVerify() + os.Unsetenv(envVerify) + return func() { + os.Setenv(envVerify, previousEnv) + } +} + +// Verify performs verification if the assertions are enabled. +// In the default setup running in tests and skipped in the production code. +func Verify(f func()) { + if IsVerificationEnabled(envVerifyValueAssert) { + f() + } +} + +// Assert will panic with a given formatted message if the given condition is false. +func Assert(condition bool, msg string, v ...any) { + if !condition { + panic(fmt.Sprintf("assertion failed: "+msg, v...)) + } +} diff --git a/vendor/go.etcd.io/etcd/client/v2/client.go b/vendor/go.etcd.io/etcd/client/v2/client.go index fda25988f6b84..c34bc9d7c849f 100644 --- a/vendor/go.etcd.io/etcd/client/v2/client.go +++ b/vendor/go.etcd.io/etcd/client/v2/client.go @@ -19,7 +19,7 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" + "io" "math/rand" "net" "net/http" @@ -601,13 +601,15 @@ func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Respon var body []byte done := make(chan struct{}) go func() { - body, err = ioutil.ReadAll(resp.Body) + body, err = io.ReadAll(resp.Body) done <- struct{}{} }() select { case <-ctx.Done(): - resp.Body.Close() + if resp != nil { + resp.Body.Close() + } <-done return nil, nil, ctx.Err() case <-done: diff --git a/vendor/go.etcd.io/etcd/client/v2/curl.go b/vendor/go.etcd.io/etcd/client/v2/curl.go index c8bc9fba20ecc..8d12367541ef1 100644 --- a/vendor/go.etcd.io/etcd/client/v2/curl.go +++ b/vendor/go.etcd.io/etcd/client/v2/curl.go @@ -17,7 +17,7 @@ package client import ( "bytes" "fmt" - "io/ioutil" + "io" "net/http" "os" ) @@ -53,18 +53,18 @@ func printcURL(req *http.Request) error { } if req.Body != nil { - b, err = ioutil.ReadAll(req.Body) + b, err = io.ReadAll(req.Body) if err != nil { return err } command += fmt.Sprintf(" -d %q", string(b)) } - fmt.Fprintf(os.Stderr, "cURL Command: %s\n", command) + fmt.Fprintf(os.Stderr, "cURL Command: %q\n", command) // reset body body := bytes.NewBuffer(b) - req.Body = ioutil.NopCloser(body) + req.Body = io.NopCloser(body) return nil } diff --git a/vendor/go.etcd.io/etcd/client/v2/doc.go b/vendor/go.etcd.io/etcd/client/v2/doc.go index 68284c20a89c2..5250758b017ca 100644 --- a/vendor/go.etcd.io/etcd/client/v2/doc.go +++ b/vendor/go.etcd.io/etcd/client/v2/doc.go @@ -68,5 +68,6 @@ Use a custom context to set timeouts on your operations: // handle error } } + */ package client diff --git a/vendor/go.etcd.io/etcd/client/v2/json.go b/vendor/go.etcd.io/etcd/client/v2/json.go deleted file mode 100644 index d5be690a1716e..0000000000000 --- a/vendor/go.etcd.io/etcd/client/v2/json.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2019 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "strconv" - "unsafe" - - "github.com/json-iterator/go" - "github.com/modern-go/reflect2" -) - -type customNumberExtension struct { - jsoniter.DummyExtension -} - -func (cne *customNumberExtension) CreateDecoder(typ reflect2.Type) jsoniter.ValDecoder { - if typ.String() == "interface {}" { - return customNumberDecoder{} - } - return nil -} - -type customNumberDecoder struct { -} - -func (customNumberDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { - switch iter.WhatIsNext() { - case jsoniter.NumberValue: - var number jsoniter.Number - iter.ReadVal(&number) - i64, err := strconv.ParseInt(string(number), 10, 64) - if err == nil { - *(*interface{})(ptr) = i64 - return - } - f64, err := strconv.ParseFloat(string(number), 64) - if err == nil { - *(*interface{})(ptr) = f64 - return - } - iter.ReportError("DecodeNumber", err.Error()) - default: - *(*interface{})(ptr) = iter.Read() - } -} - -// caseSensitiveJsonIterator returns a jsoniterator API that's configured to be -// case-sensitive when unmarshalling, and otherwise compatible with -// the encoding/json standard library. -func caseSensitiveJsonIterator() jsoniter.API { - config := jsoniter.Config{ - EscapeHTML: true, - SortMapKeys: true, - ValidateJsonRawMessage: true, - CaseSensitive: true, - }.Froze() - // Force jsoniter to decode number to interface{} via int64/float64, if possible. - config.RegisterExtension(&customNumberExtension{}) - return config -} diff --git a/vendor/go.etcd.io/etcd/client/v2/keys.go b/vendor/go.etcd.io/etcd/client/v2/keys.go index e8f166461747c..7eb927e562953 100644 --- a/vendor/go.etcd.io/etcd/client/v2/keys.go +++ b/vendor/go.etcd.io/etcd/client/v2/keys.go @@ -26,6 +26,7 @@ import ( "time" "go.etcd.io/etcd/client/pkg/v3/pathutil" + kjson "sigs.k8s.io/json" ) const ( @@ -653,11 +654,9 @@ func unmarshalHTTPResponse(code int, header http.Header, body []byte) (res *Resp return res, err } -var jsonIterator = caseSensitiveJsonIterator() - func unmarshalSuccessfulKeysResponse(header http.Header, body []byte) (*Response, error) { var res Response - err := jsonIterator.Unmarshal(body, &res) + err := kjson.UnmarshalCaseSensitivePreserveInts(body, &res) if err != nil { return nil, ErrInvalidJSON } diff --git a/vendor/go.etcd.io/etcd/client/v3/OWNERS b/vendor/go.etcd.io/etcd/client/v3/OWNERS new file mode 100644 index 0000000000000..2b7f28b793969 --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/v3/OWNERS @@ -0,0 +1,4 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +labels: + - area/clientv3 diff --git a/vendor/go.etcd.io/etcd/client/v3/README.md b/vendor/go.etcd.io/etcd/client/v3/README.md index 1e037d7eb6b5b..af0087ebcc0f1 100644 --- a/vendor/go.etcd.io/etcd/client/v3/README.md +++ b/vendor/go.etcd.io/etcd/client/v3/README.md @@ -1,7 +1,7 @@ -# etcd/clientv3 +# etcd/client/v3 [![Docs](https://img.shields.io/badge/docs-latest-green.svg)](https://etcd.io/docs) -[![Godoc](https://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/go.etcd.io/etcd/clientv3) +[![Godoc](https://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/go.etcd.io/etcd/client/v3) `etcd/clientv3` is the official Go etcd client for v3. @@ -11,26 +11,23 @@ go get go.etcd.io/etcd/client/v3 ``` -Warning: As etcd 3.5.0 was not yet released, the command above does not work. -After first pre-release of 3.5.0 [#12498](https://github.com/etcd-io/etcd/issues/12498), -etcd can be referenced using: -``` -go get go.etcd.io/etcd/client/v3@v3.5.0-pre -``` - ## Get started Create client using `clientv3.New`: ```go -cli, err := clientv3.New(clientv3.Config{ - Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"}, - DialTimeout: 5 * time.Second, -}) -if err != nil { - // handle error! +import clientv3 "go.etcd.io/etcd/client/v3" + +func main() { + cli, err := clientv3.New(clientv3.Config{ + Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"}, + DialTimeout: 5 * time.Second, + }) + if err != nil { + // handle error! + } + defer cli.Close() } -defer cli.Close() ``` etcd v3 uses [`gRPC`](https://www.grpc.io) for remote procedure calls. And `clientv3` uses diff --git a/vendor/go.etcd.io/etcd/client/v3/auth.go b/vendor/go.etcd.io/etcd/client/v3/auth.go index 110918a4c7c41..382172b21bf59 100644 --- a/vendor/go.etcd.io/etcd/client/v3/auth.go +++ b/vendor/go.etcd.io/etcd/client/v3/auth.go @@ -19,9 +19,10 @@ import ( "fmt" "strings" + "google.golang.org/grpc" + "go.etcd.io/etcd/api/v3/authpb" pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "google.golang.org/grpc" ) type ( diff --git a/vendor/go.etcd.io/etcd/client/v3/client.go b/vendor/go.etcd.io/etcd/client/v3/client.go index f7aa65a0a7298..24f5988986d35 100644 --- a/vendor/go.etcd.io/etcd/client/v3/client.go +++ b/vendor/go.etcd.io/etcd/client/v3/client.go @@ -18,22 +18,26 @@ import ( "context" "errors" "fmt" - "strconv" "strings" "sync" "time" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - "go.etcd.io/etcd/client/pkg/v3/logutil" - "go.etcd.io/etcd/client/v3/credentials" - "go.etcd.io/etcd/client/v3/internal/endpoint" - "go.etcd.io/etcd/client/v3/internal/resolver" + "github.com/coreos/go-semver/semver" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/codes" grpccredentials "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/status" + + "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" + "go.etcd.io/etcd/api/v3/version" + "go.etcd.io/etcd/client/pkg/v3/logutil" + "go.etcd.io/etcd/client/pkg/v3/verify" + "go.etcd.io/etcd/client/v3/credentials" + "go.etcd.io/etcd/client/v3/internal/endpoint" + "go.etcd.io/etcd/client/v3/internal/resolver" ) var ( @@ -55,7 +59,9 @@ type Client struct { cfg Config creds grpccredentials.TransportCredentials resolver *resolver.EtcdManualResolver - mu *sync.RWMutex + + epMu *sync.RWMutex + endpoints []string ctx context.Context cancel context.CancelFunc @@ -64,7 +70,7 @@ type Client struct { Username string // Password is a password for authentication. Password string - authTokenBundle credentials.Bundle + authTokenBundle credentials.PerRPCCredentialsBundle callOpts []grpc.CallOption @@ -86,7 +92,7 @@ func New(cfg Config) (*Client, error) { // service interface implementations and do not need connection management. func NewCtxClient(ctx context.Context, opts ...Option) *Client { cctx, cancel := context.WithCancel(ctx) - c := &Client{ctx: cctx, cancel: cancel, lgMu: new(sync.RWMutex), mu: new(sync.RWMutex)} + c := &Client{ctx: cctx, cancel: cancel, lgMu: new(sync.RWMutex), epMu: new(sync.RWMutex)} for _, opt := range opts { opt(c) } @@ -161,18 +167,18 @@ func (c *Client) Ctx() context.Context { return c.ctx } // Endpoints lists the registered endpoints for the client. func (c *Client) Endpoints() []string { // copy the slice; protect original endpoints from being changed - c.mu.RLock() - defer c.mu.RUnlock() - eps := make([]string, len(c.cfg.Endpoints)) - copy(eps, c.cfg.Endpoints) + c.epMu.RLock() + defer c.epMu.RUnlock() + eps := make([]string, len(c.endpoints)) + copy(eps, c.endpoints) return eps } // SetEndpoints updates client's endpoints. func (c *Client) SetEndpoints(eps ...string) { - c.mu.Lock() - defer c.mu.Unlock() - c.cfg.Endpoints = eps + c.epMu.Lock() + defer c.epMu.Unlock() + c.endpoints = eps c.resolver.SetEndpoints(eps) } @@ -189,7 +195,15 @@ func (c *Client) Sync(ctx context.Context) error { eps = append(eps, m.ClientURLs...) } } + // The linearizable `MemberList` returned successfully, so the + // endpoints shouldn't be empty. + verify.Verify(func() { + if len(eps) == 0 { + panic("empty endpoints returned from etcd cluster") + } + }) c.SetEndpoints(eps...) + c.lg.Debug("set etcd endpoints by autoSync", zap.Strings("endpoints", eps)) return nil } @@ -206,7 +220,7 @@ func (c *Client) autoSync() { ctx, cancel := context.WithTimeout(c.ctx, 5*time.Second) err := c.Sync(ctx) cancel() - if err != nil && err != c.ctx.Err() { + if err != nil && !errors.Is(err, c.ctx.Err()) { c.lg.Info("Auto sync endpoints failed.", zap.Error(err)) } } @@ -214,7 +228,9 @@ func (c *Client) autoSync() { } // dialSetupOpts gives the dial opts prior to any authentication. -func (c *Client) dialSetupOpts(creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) (opts []grpc.DialOption, err error) { +func (c *Client) dialSetupOpts(creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) []grpc.DialOption { + var opts []grpc.DialOption + if c.cfg.DialKeepAliveTime > 0 { params := keepalive.ClientParameters{ Time: c.cfg.DialKeepAliveTime, @@ -228,7 +244,7 @@ func (c *Client) dialSetupOpts(creds grpccredentials.TransportCredentials, dopts if creds != nil { opts = append(opts, grpc.WithTransportCredentials(creds)) } else { - opts = append(opts, grpc.WithInsecure()) + opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) } unaryMaxRetries := defaultUnaryMaxRetries @@ -257,7 +273,7 @@ func (c *Client) dialSetupOpts(creds grpccredentials.TransportCredentials, dopts grpc.WithUnaryInterceptor(c.unaryClientInterceptor(withMax(unaryMaxRetries), rrBackoff)), ) - return opts, nil + return opts } // Dial connects to a single endpoint using the client's config. @@ -278,7 +294,7 @@ func (c *Client) getToken(ctx context.Context) error { resp, err := c.Auth.Authenticate(ctx, c.Username, c.Password) if err != nil { - if err == rpctypes.ErrAuthNotEnabled { + if errors.Is(err, rpctypes.ErrAuthNotEnabled) { c.authTokenBundle.UpdateAuthToken("") return nil } @@ -298,10 +314,8 @@ func (c *Client) dialWithBalancer(dopts ...grpc.DialOption) (*grpc.ClientConn, e // dial configures and dials any grpc balancer target. func (c *Client) dial(creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) (*grpc.ClientConn, error) { - opts, err := c.dialSetupOpts(creds, dopts...) - if err != nil { - return nil, fmt.Errorf("failed to configure dialer: %v", err) - } + opts := c.dialSetupOpts(creds, dopts...) + if c.authTokenBundle != nil { opts = append(opts, grpc.WithPerRPCCredentials(c.authTokenBundle.PerRPCCredentials())) } @@ -314,7 +328,7 @@ func (c *Client) dial(creds grpccredentials.TransportCredentials, dopts ...grpc. dctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout) defer cancel() // TODO: Is this right for cases where grpc.WithBlock() is not set on the dial options? } - target := fmt.Sprintf("%s://%p/%s", resolver.Schema, c, authority(c.Endpoints()[0])) + target := fmt.Sprintf("%s://%p/%s", resolver.Schema, c, authority(c.endpoints[0])) conn, err := grpc.DialContext(dctx, target, opts...) if err != nil { return nil, err @@ -339,15 +353,15 @@ func authority(endpoint string) string { func (c *Client) credentialsForEndpoint(ep string) grpccredentials.TransportCredentials { r := endpoint.RequiresCredentials(ep) switch r { - case endpoint.CREDS_DROP: + case endpoint.CredsDrop: return nil - case endpoint.CREDS_OPTIONAL: + case endpoint.CredsOptional: return c.creds - case endpoint.CREDS_REQUIRE: + case endpoint.CredsRequire: if c.creds != nil { return c.creds } - return credentials.NewBundle(credentials.Config{}).TransportCredentials() + return credentials.NewTransportCredential(nil) default: panic(fmt.Errorf("unsupported CredsRequirement: %v", r)) } @@ -359,7 +373,7 @@ func newClient(cfg *Config) (*Client, error) { } var creds grpccredentials.TransportCredentials if cfg.TLS != nil { - creds = credentials.NewBundle(credentials.Config{TLSConfig: cfg.TLS}).TransportCredentials() + creds = credentials.NewTransportCredential(cfg.TLS) } // use a temporary skeleton client to bootstrap first connection @@ -375,7 +389,7 @@ func newClient(cfg *Config) (*Client, error) { creds: creds, ctx: ctx, cancel: cancel, - mu: new(sync.RWMutex), + epMu: new(sync.RWMutex), callOpts: defaultCallOpts, lgMu: new(sync.RWMutex), } @@ -398,7 +412,7 @@ func newClient(cfg *Config) (*Client, error) { if cfg.Username != "" && cfg.Password != "" { client.Username = cfg.Username client.Password = cfg.Password - client.authTokenBundle = credentials.NewBundle(credentials.Config{}) + client.authTokenBundle = credentials.NewPerRPCCredentialBundle() } if cfg.MaxCallSendMsgSize > 0 || cfg.MaxCallRecvMsgSize > 0 { if cfg.MaxCallRecvMsgSize > 0 && cfg.MaxCallSendMsgSize > cfg.MaxCallRecvMsgSize { @@ -422,8 +436,10 @@ func newClient(cfg *Config) (*Client, error) { if len(cfg.Endpoints) < 1 { client.cancel() - return nil, fmt.Errorf("at least one Endpoint is required in client config") + return nil, errors.New("at least one Endpoint is required in client config") } + client.SetEndpoints(cfg.Endpoints...) + // Use a provided endpoint target so that for https:// without any tls config given, then // grpc will assume the certificate server name is the endpoint host. conn, err := client.dialWithBalancer() @@ -442,7 +458,7 @@ func newClient(cfg *Config) (*Client, error) { client.Auth = NewAuth(client) client.Maintenance = NewMaintenance(client) - //get token with established connection + // get token with established connection ctx, cancel = client.ctx, func() {} if client.cfg.DialTimeout > 0 { ctx, cancel = context.WithTimeout(ctx, client.cfg.DialTimeout) @@ -451,7 +467,7 @@ func newClient(cfg *Config) (*Client, error) { if err != nil { client.Close() cancel() - //TODO: Consider fmt.Errorf("communicating with [%s] failed: %v", strings.Join(cfg.Endpoints, ";"), err) + // TODO: Consider fmt.Errorf("communicating with [%s] failed: %v", strings.Join(cfg.Endpoints, ";"), err) return nil, err } cancel() @@ -483,6 +499,22 @@ func (c *Client) roundRobinQuorumBackoff(waitBetween time.Duration, jitterFracti } } +// minSupportedVersion returns the minimum version supported, which is the previous minor release. +func minSupportedVersion() *semver.Version { + ver := semver.Must(semver.NewVersion(version.Version)) + // consider only major and minor version + ver = &semver.Version{Major: ver.Major, Minor: ver.Minor} + for i := range version.AllVersions { + if version.AllVersions[i].Equal(*ver) { + if i == 0 { + return ver + } + return &version.AllVersions[i-1] + } + } + panic("current version is not in the version list") +} + func (c *Client) checkVersion() (err error) { var wg sync.WaitGroup @@ -504,20 +536,13 @@ func (c *Client) checkVersion() (err error) { errc <- rerr return } - vs := strings.Split(resp.Version, ".") - maj, min := 0, 0 - if len(vs) >= 2 { - var serr error - if maj, serr = strconv.Atoi(vs[0]); serr != nil { - errc <- serr - return - } - if min, serr = strconv.Atoi(vs[1]); serr != nil { - errc <- serr - return - } + vs, serr := semver.NewVersion(resp.Version) + if serr != nil { + errc <- serr + return } - if maj < 3 || (maj == 3 && min < 4) { + + if vs.LessThan(*minSupportedVersion()) { rerr = ErrOldCluster } errc <- rerr @@ -580,7 +605,8 @@ func ContextError(ctx context.Context, err error) error { return nil } err = rpctypes.Error(err) - if _, ok := err.(rpctypes.EtcdError); ok { + var serverErr rpctypes.EtcdError + if errors.As(err, &serverErr) { return err } if ev, ok := status.FromError(err); ok { @@ -602,7 +628,7 @@ func canceledByCaller(stopCtx context.Context, err error) bool { return false } - return err == context.Canceled || err == context.DeadlineExceeded + return errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) } // IsConnCanceled returns true, if error is from a closed gRPC connection. @@ -620,7 +646,7 @@ func IsConnCanceled(err error) bool { } // >= gRPC v1.10.x - if err == context.Canceled { + if errors.Is(err, context.Canceled) { return true } diff --git a/vendor/go.etcd.io/etcd/client/v3/cluster.go b/vendor/go.etcd.io/etcd/client/v3/cluster.go index 1815c1c964085..1b7e83375c30d 100644 --- a/vendor/go.etcd.io/etcd/client/v3/cluster.go +++ b/vendor/go.etcd.io/etcd/client/v3/cluster.go @@ -17,10 +17,10 @@ package clientv3 import ( "context" + "google.golang.org/grpc" + pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/client/pkg/v3/types" - - "google.golang.org/grpc" ) type ( @@ -34,7 +34,7 @@ type ( type Cluster interface { // MemberList lists the current cluster membership. - MemberList(ctx context.Context) (*MemberListResponse, error) + MemberList(ctx context.Context, opts ...OpOption) (*MemberListResponse, error) // MemberAdd adds a new member into the cluster. MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) @@ -122,9 +122,9 @@ func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []strin return nil, ContextError(ctx, err) } -func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) { - // it is safe to retry on list. - resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{Linearizable: true}, c.callOpts...) +func (c *cluster) MemberList(ctx context.Context, opts ...OpOption) (*MemberListResponse, error) { + opt := OpGet("", opts...) + resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{Linearizable: !opt.serializable}, c.callOpts...) if err == nil { return (*MemberListResponse)(resp), nil } diff --git a/vendor/go.etcd.io/etcd/client/v3/compare.go b/vendor/go.etcd.io/etcd/client/v3/compare.go index e2967cf38ed3d..663fdb4d2064e 100644 --- a/vendor/go.etcd.io/etcd/client/v3/compare.go +++ b/vendor/go.etcd.io/etcd/client/v3/compare.go @@ -18,8 +18,10 @@ import ( pb "go.etcd.io/etcd/api/v3/etcdserverpb" ) -type CompareTarget int -type CompareResult int +type ( + CompareTarget int + CompareResult int +) const ( CompareVersion CompareTarget = iota @@ -30,7 +32,7 @@ const ( type Cmp pb.Compare -func Compare(cmp Cmp, result string, v interface{}) Cmp { +func Compare(cmp Cmp, result string, v any) Cmp { var r pb.Compare_CompareResult switch result { @@ -120,7 +122,7 @@ func (cmp Cmp) WithPrefix() Cmp { } // mustInt64 panics if val isn't an int or int64. It returns an int64 otherwise. -func mustInt64(val interface{}) int64 { +func mustInt64(val any) int64 { if v, ok := val.(int64); ok { return v } @@ -132,7 +134,7 @@ func mustInt64(val interface{}) int64 { // mustInt64orLeaseID panics if val isn't a LeaseID, int or int64. It returns an // int64 otherwise. -func mustInt64orLeaseID(val interface{}) int64 { +func mustInt64orLeaseID(val any) int64 { if v, ok := val.(LeaseID); ok { return int64(v) } diff --git a/vendor/go.etcd.io/etcd/client/v3/concurrency/election.go b/vendor/go.etcd.io/etcd/client/v3/concurrency/election.go index 31e93d242809b..ac1303dd8b48c 100644 --- a/vendor/go.etcd.io/etcd/client/v3/concurrency/election.go +++ b/vendor/go.etcd.io/etcd/client/v3/concurrency/election.go @@ -90,7 +90,7 @@ func (e *Election) Campaign(ctx context.Context, val string) error { } } - _, err = waitDeletes(ctx, client, e.keyPrefix, e.leaderRev-1) + err = waitDeletes(ctx, client, e.keyPrefix, e.leaderRev-1) if err != nil { // clean up in case of context cancel select { diff --git a/vendor/go.etcd.io/etcd/client/v3/concurrency/key.go b/vendor/go.etcd.io/etcd/client/v3/concurrency/key.go index 20825950f307b..92e365c47151a 100644 --- a/vendor/go.etcd.io/etcd/client/v3/concurrency/key.go +++ b/vendor/go.etcd.io/etcd/client/v3/concurrency/key.go @@ -16,9 +16,8 @@ package concurrency import ( "context" - "fmt" + "errors" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/api/v3/mvccpb" v3 "go.etcd.io/etcd/client/v3" ) @@ -42,24 +41,24 @@ func waitDelete(ctx context.Context, client *v3.Client, key string, rev int64) e if err := ctx.Err(); err != nil { return err } - return fmt.Errorf("lost watcher waiting for delete") + return errors.New("lost watcher waiting for delete") } // waitDeletes efficiently waits until all keys matching the prefix and no greater -// than the create revision. -func waitDeletes(ctx context.Context, client *v3.Client, pfx string, maxCreateRev int64) (*pb.ResponseHeader, error) { +// than the create revision are deleted. +func waitDeletes(ctx context.Context, client *v3.Client, pfx string, maxCreateRev int64) error { getOpts := append(v3.WithLastCreate(), v3.WithMaxCreateRev(maxCreateRev)) for { resp, err := client.Get(ctx, pfx, getOpts...) if err != nil { - return nil, err + return err } if len(resp.Kvs) == 0 { - return resp.Header, nil + return nil } lastKey := string(resp.Kvs[0].Key) if err = waitDelete(ctx, client, lastKey, resp.Header.Revision); err != nil { - return nil, err + return err } } } diff --git a/vendor/go.etcd.io/etcd/client/v3/concurrency/mutex.go b/vendor/go.etcd.io/etcd/client/v3/concurrency/mutex.go index c3800d6282ab1..6898bbcec4144 100644 --- a/vendor/go.etcd.io/etcd/client/v3/concurrency/mutex.go +++ b/vendor/go.etcd.io/etcd/client/v3/concurrency/mutex.go @@ -18,6 +18,7 @@ import ( "context" "errors" "fmt" + "strings" "sync" pb "go.etcd.io/etcd/api/v3/etcdserverpb" @@ -25,8 +26,11 @@ import ( ) // ErrLocked is returned by TryLock when Mutex is already locked by another session. -var ErrLocked = errors.New("mutex: Locked by another session") -var ErrSessionExpired = errors.New("mutex: session is expired") +var ( + ErrLocked = errors.New("mutex: Locked by another session") + ErrSessionExpired = errors.New("mutex: session is expired") + ErrLockReleased = errors.New("mutex: lock has already been released") +) // Mutex implements the sync Locker interface with etcd type Mutex struct { @@ -82,7 +86,7 @@ func (m *Mutex) Lock(ctx context.Context) error { client := m.s.Client() // wait for deletion revisions prior to myKey // TODO: early termination if the session key is deleted before other session keys with smaller revisions. - _, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1) + werr := waitDeletes(ctx, client, m.pfx, m.myRev-1) // release lock key if wait failed if werr != nil { m.Unlock(client.Ctx()) @@ -128,6 +132,14 @@ func (m *Mutex) tryAcquire(ctx context.Context) (*v3.TxnResponse, error) { } func (m *Mutex) Unlock(ctx context.Context) error { + if m.myKey == "" || m.myRev <= 0 || m.myKey == "\x00" { + return ErrLockReleased + } + + if !strings.HasPrefix(m.myKey, m.pfx) { + return fmt.Errorf("invalid key %q, it should have prefix %q", m.myKey, m.pfx) + } + client := m.s.Client() if _, err := client.Delete(ctx, m.myKey); err != nil { return err @@ -154,6 +166,7 @@ func (lm *lockerMutex) Lock() { panic(err) } } + func (lm *lockerMutex) Unlock() { client := lm.s.Client() if err := lm.Mutex.Unlock(client.Ctx()); err != nil { diff --git a/vendor/go.etcd.io/etcd/client/v3/concurrency/session.go b/vendor/go.etcd.io/etcd/client/v3/concurrency/session.go index 7143cc47471c0..2275e96c972cc 100644 --- a/vendor/go.etcd.io/etcd/client/v3/concurrency/session.go +++ b/vendor/go.etcd.io/etcd/client/v3/concurrency/session.go @@ -18,6 +18,8 @@ import ( "context" "time" + "go.uber.org/zap" + v3 "go.etcd.io/etcd/client/v3" ) @@ -30,15 +32,17 @@ type Session struct { opts *sessionOptions id v3.LeaseID + ctx context.Context cancel context.CancelFunc donec <-chan struct{} } // NewSession gets the leased session for a client. func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) { + lg := client.GetLogger() ops := &sessionOptions{ttl: defaultSessionTTL, ctx: client.Ctx()} for _, opt := range opts { - opt(ops) + opt(ops, lg) } id := ops.leaseID @@ -58,11 +62,14 @@ func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) { } donec := make(chan struct{}) - s := &Session{client: client, opts: ops, id: id, cancel: cancel, donec: donec} + s := &Session{client: client, opts: ops, id: id, ctx: ctx, cancel: cancel, donec: donec} // keep the lease alive until client error or cancelled context go func() { - defer close(donec) + defer func() { + close(donec) + cancel() + }() for range keepAlive { // eat messages until keep alive channel closes } @@ -79,6 +86,12 @@ func (s *Session) Client() *v3.Client { // Lease is the lease ID for keys bound to the session. func (s *Session) Lease() v3.LeaseID { return s.id } +// Ctx is the context attached to the session, it is canceled when the lease is orphaned, expires, or +// is otherwise no longer being refreshed. +func (s *Session) Ctx() context.Context { + return s.ctx +} + // Done returns a channel that closes when the lease is orphaned, expires, or // is otherwise no longer being refreshed. func (s *Session) Done() <-chan struct{} { return s.donec } @@ -108,14 +121,16 @@ type sessionOptions struct { } // SessionOption configures Session. -type SessionOption func(*sessionOptions) +type SessionOption func(*sessionOptions, *zap.Logger) // WithTTL configures the session's TTL in seconds. // If TTL is <= 0, the default 60 seconds TTL will be used. func WithTTL(ttl int) SessionOption { - return func(so *sessionOptions) { + return func(so *sessionOptions, lg *zap.Logger) { if ttl > 0 { so.ttl = ttl + } else { + lg.Warn("WithTTL(): TTL should be > 0, preserving current TTL", zap.Int64("current-session-ttl", int64(so.ttl))) } } } @@ -124,7 +139,7 @@ func WithTTL(ttl int) SessionOption { // This is useful in process restart scenario, for example, to reclaim // leadership from an election prior to restart. func WithLease(leaseID v3.LeaseID) SessionOption { - return func(so *sessionOptions) { + return func(so *sessionOptions, _ *zap.Logger) { so.leaseID = leaseID } } @@ -135,7 +150,7 @@ func WithLease(leaseID v3.LeaseID) SessionOption { // context is canceled before Close() completes, the session's lease will be // abandoned and left to expire instead of being revoked. func WithContext(ctx context.Context) SessionOption { - return func(so *sessionOptions) { + return func(so *sessionOptions, _ *zap.Logger) { so.ctx = ctx } } diff --git a/vendor/go.etcd.io/etcd/client/v3/concurrency/stm.go b/vendor/go.etcd.io/etcd/client/v3/concurrency/stm.go index ba7303d09779c..49a82181db700 100644 --- a/vendor/go.etcd.io/etcd/client/v3/concurrency/stm.go +++ b/vendor/go.etcd.io/etcd/client/v3/concurrency/stm.go @@ -303,6 +303,10 @@ type stmSerializable struct { } func (s *stmSerializable) Get(keys ...string) string { + if len(keys) == 0 { + return "" + } + if wv := s.wset.get(keys...); wv != nil { return wv.val } diff --git a/vendor/go.etcd.io/etcd/client/v3/config.go b/vendor/go.etcd.io/etcd/client/v3/config.go index 6f5b41978d316..8351828d2f90c 100644 --- a/vendor/go.etcd.io/etcd/client/v3/config.go +++ b/vendor/go.etcd.io/etcd/client/v3/config.go @@ -21,6 +21,8 @@ import ( "go.uber.org/zap" "google.golang.org/grpc" + + "go.etcd.io/etcd/client/pkg/v3/transport" ) type Config struct { @@ -52,7 +54,7 @@ type Config struct { // If 0, it defaults to "math.MaxInt32", because range response can // easily exceed request send limits. // Make sure that "MaxCallRecvMsgSize" >= server-side default send/recv limit. - // ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes"). + // ("--max-recv-bytes" flag to etcd). MaxCallRecvMsgSize int // TLS holds the client secure credentials, if any. @@ -99,3 +101,128 @@ type Config struct { // TODO: support custom balancer picker } + +// ConfigSpec is the configuration from users, which comes from command-line flags, +// environment variables or config file. It is a fully declarative configuration, +// and can be serialized & deserialized to/from JSON. +type ConfigSpec struct { + Endpoints []string `json:"endpoints"` + RequestTimeout time.Duration `json:"request-timeout"` + DialTimeout time.Duration `json:"dial-timeout"` + KeepAliveTime time.Duration `json:"keepalive-time"` + KeepAliveTimeout time.Duration `json:"keepalive-timeout"` + MaxCallSendMsgSize int `json:"max-request-bytes"` + MaxCallRecvMsgSize int `json:"max-recv-bytes"` + Secure *SecureConfig `json:"secure"` + Auth *AuthConfig `json:"auth"` +} + +type SecureConfig struct { + Cert string `json:"cert"` + Key string `json:"key"` + Cacert string `json:"cacert"` + ServerName string `json:"server-name"` + + InsecureTransport bool `json:"insecure-transport"` + InsecureSkipVerify bool `json:"insecure-skip-tls-verify"` +} + +type AuthConfig struct { + Username string `json:"username"` + Password string `json:"password"` +} + +func (cs *ConfigSpec) Clone() *ConfigSpec { + if cs == nil { + return nil + } + + clone := *cs + + if len(cs.Endpoints) > 0 { + clone.Endpoints = make([]string, len(cs.Endpoints)) + copy(clone.Endpoints, cs.Endpoints) + } + + if cs.Secure != nil { + clone.Secure = &SecureConfig{} + *clone.Secure = *cs.Secure + } + if cs.Auth != nil { + clone.Auth = &AuthConfig{} + *clone.Auth = *cs.Auth + } + + return &clone +} + +func (cfg AuthConfig) Empty() bool { + return cfg.Username == "" && cfg.Password == "" +} + +// NewClientConfig creates a Config based on the provided ConfigSpec. +func NewClientConfig(confSpec *ConfigSpec, lg *zap.Logger) (*Config, error) { + tlsCfg, err := newTLSConfig(confSpec.Secure, lg) + if err != nil { + return nil, err + } + + cfg := &Config{ + Endpoints: confSpec.Endpoints, + DialTimeout: confSpec.DialTimeout, + DialKeepAliveTime: confSpec.KeepAliveTime, + DialKeepAliveTimeout: confSpec.KeepAliveTimeout, + MaxCallSendMsgSize: confSpec.MaxCallSendMsgSize, + MaxCallRecvMsgSize: confSpec.MaxCallRecvMsgSize, + TLS: tlsCfg, + } + + if confSpec.Auth != nil { + cfg.Username = confSpec.Auth.Username + cfg.Password = confSpec.Auth.Password + } + + return cfg, nil +} + +func newTLSConfig(scfg *SecureConfig, lg *zap.Logger) (*tls.Config, error) { + var ( + tlsCfg *tls.Config + err error + ) + + if scfg == nil { + return nil, nil + } + + if scfg.Cert != "" || scfg.Key != "" || scfg.Cacert != "" || scfg.ServerName != "" { + cfgtls := &transport.TLSInfo{ + CertFile: scfg.Cert, + KeyFile: scfg.Key, + TrustedCAFile: scfg.Cacert, + ServerName: scfg.ServerName, + Logger: lg, + } + if tlsCfg, err = cfgtls.ClientConfig(); err != nil { + return nil, err + } + } + + // If key/cert is not given but user wants secure connection, we + // should still setup an empty tls configuration for gRPC to setup + // secure connection. + if tlsCfg == nil && !scfg.InsecureTransport { + tlsCfg = &tls.Config{} + } + + // If the user wants to skip TLS verification then we should set + // the InsecureSkipVerify flag in tls configuration. + if scfg.InsecureSkipVerify { + if tlsCfg == nil { + tlsCfg = &tls.Config{} + } + tlsCfg.InsecureSkipVerify = scfg.InsecureSkipVerify + } + + return tlsCfg, nil +} diff --git a/vendor/go.etcd.io/etcd/client/v3/credentials/credentials.go b/vendor/go.etcd.io/etcd/client/v3/credentials/credentials.go index 42f688eb359c4..a2d8b45e86ab6 100644 --- a/vendor/go.etcd.io/etcd/client/v3/credentials/credentials.go +++ b/vendor/go.etcd.io/etcd/client/v3/credentials/credentials.go @@ -19,92 +19,51 @@ package credentials import ( "context" "crypto/tls" - "net" "sync" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" grpccredentials "google.golang.org/grpc/credentials" + + "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" ) -// Config defines gRPC credential configuration. -type Config struct { - TLSConfig *tls.Config +func NewTransportCredential(cfg *tls.Config) grpccredentials.TransportCredentials { + return grpccredentials.NewTLS(cfg) } -// Bundle defines gRPC credential interface. -type Bundle interface { - grpccredentials.Bundle +// PerRPCCredentialsBundle defines gRPC credential interface. +type PerRPCCredentialsBundle interface { UpdateAuthToken(token string) + PerRPCCredentials() grpccredentials.PerRPCCredentials } -// NewBundle constructs a new gRPC credential bundle. -func NewBundle(cfg Config) Bundle { - return &bundle{ - tc: newTransportCredential(cfg.TLSConfig), - rc: newPerRPCCredential(), +func NewPerRPCCredentialBundle() PerRPCCredentialsBundle { + return &perRPCCredentialBundle{ + rc: &perRPCCredential{}, } } -// bundle implements "grpccredentials.Bundle" interface. -type bundle struct { - tc *transportCredential +// perRPCCredentialBundle implements `PerRPCCredentialsBundle` interface. +type perRPCCredentialBundle struct { rc *perRPCCredential } -func (b *bundle) TransportCredentials() grpccredentials.TransportCredentials { - return b.tc -} - -func (b *bundle) PerRPCCredentials() grpccredentials.PerRPCCredentials { - return b.rc -} - -func (b *bundle) NewWithMode(mode string) (grpccredentials.Bundle, error) { - // no-op - return nil, nil -} - -// transportCredential implements "grpccredentials.TransportCredentials" interface. -type transportCredential struct { - gtc grpccredentials.TransportCredentials -} - -func newTransportCredential(cfg *tls.Config) *transportCredential { - return &transportCredential{ - gtc: grpccredentials.NewTLS(cfg), - } -} - -func (tc *transportCredential) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, grpccredentials.AuthInfo, error) { - return tc.gtc.ClientHandshake(ctx, authority, rawConn) -} - -func (tc *transportCredential) ServerHandshake(rawConn net.Conn) (net.Conn, grpccredentials.AuthInfo, error) { - return tc.gtc.ServerHandshake(rawConn) -} - -func (tc *transportCredential) Info() grpccredentials.ProtocolInfo { - return tc.gtc.Info() -} - -func (tc *transportCredential) Clone() grpccredentials.TransportCredentials { - return &transportCredential{ - gtc: tc.gtc.Clone(), +func (b *perRPCCredentialBundle) UpdateAuthToken(token string) { + if b.rc == nil { + return } + b.rc.UpdateAuthToken(token) } -func (tc *transportCredential) OverrideServerName(serverNameOverride string) error { - return tc.gtc.OverrideServerName(serverNameOverride) +func (b *perRPCCredentialBundle) PerRPCCredentials() grpccredentials.PerRPCCredentials { + return b.rc } -// perRPCCredential implements "grpccredentials.PerRPCCredentials" interface. +// perRPCCredential implements `grpccredentials.PerRPCCredentials` interface. type perRPCCredential struct { authToken string authTokenMu sync.RWMutex } -func newPerRPCCredential() *perRPCCredential { return &perRPCCredential{} } - func (rc *perRPCCredential) RequireTransportSecurity() bool { return false } func (rc *perRPCCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) { @@ -117,13 +76,6 @@ func (rc *perRPCCredential) GetRequestMetadata(ctx context.Context, s ...string) return map[string]string{rpctypes.TokenFieldNameGRPC: authToken}, nil } -func (b *bundle) UpdateAuthToken(token string) { - if b.rc == nil { - return - } - b.rc.UpdateAuthToken(token) -} - func (rc *perRPCCredential) UpdateAuthToken(token string) { rc.authTokenMu.Lock() rc.authToken = token diff --git a/vendor/go.etcd.io/etcd/client/v3/ctx.go b/vendor/go.etcd.io/etcd/client/v3/ctx.go index 56b69cf2ede8a..38cee6c27e4a0 100644 --- a/vendor/go.etcd.io/etcd/client/v3/ctx.go +++ b/vendor/go.etcd.io/etcd/client/v3/ctx.go @@ -17,9 +17,10 @@ package clientv3 import ( "context" + "google.golang.org/grpc/metadata" + "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/api/v3/version" - "google.golang.org/grpc/metadata" ) // WithRequireLeader requires client requests to only succeed diff --git a/vendor/go.etcd.io/etcd/client/v3/doc.go b/vendor/go.etcd.io/etcd/client/v3/doc.go index fd61aff117aaf..bd820d3d79efd 100644 --- a/vendor/go.etcd.io/etcd/client/v3/doc.go +++ b/vendor/go.etcd.io/etcd/client/v3/doc.go @@ -47,8 +47,8 @@ // To specify a client request timeout, wrap the context with context.WithTimeout: // // ctx, cancel := context.WithTimeout(context.Background(), timeout) +// defer cancel() // resp, err := kvc.Put(ctx, "sample_key", "sample_value") -// cancel() // if err != nil { // // handle error! // } @@ -61,8 +61,7 @@ // // 1. context error: canceled or deadline exceeded. // 2. gRPC error: e.g. when clock drifts in server-side before client's context deadline exceeded. -// -// See https://github.com/etcd-io/etcd/blob/main/api/v3rpc/rpctypes/error.go +// See https://github.com/etcd-io/etcd/blob/main/api/v3rpc/rpctypes/error.go // // Here is the example code to handle client errors: // diff --git a/vendor/go.etcd.io/etcd/client/v3/internal/endpoint/endpoint.go b/vendor/go.etcd.io/etcd/client/v3/internal/endpoint/endpoint.go index 35a3fe8c33749..2c45b5e3067bd 100644 --- a/vendor/go.etcd.io/etcd/client/v3/internal/endpoint/endpoint.go +++ b/vendor/go.etcd.io/etcd/client/v3/internal/endpoint/endpoint.go @@ -25,12 +25,12 @@ import ( type CredsRequirement int const ( - // CREDS_REQUIRE - Credentials/certificate required for thi type of connection. - CREDS_REQUIRE CredsRequirement = iota - // CREDS_DROP - Credentials/certificate not needed and should get ignored. - CREDS_DROP - // CREDS_OPTIONAL - Credentials/certificate might be used if supplied - CREDS_OPTIONAL + // CredsRequire - Credentials/certificate required for thi type of connection. + CredsRequire CredsRequirement = iota + // CredsDrop - Credentials/certificate not needed and should get ignored. + CredsDrop + // CredsOptional - Credentials/certificate might be used if supplied + CredsOptional ) func extractHostFromHostPort(ep string) string { @@ -54,20 +54,20 @@ func mustSplit2(s, sep string) (string, string) { func schemeToCredsRequirement(schema string) CredsRequirement { switch schema { case "https", "unixs": - return CREDS_REQUIRE + return CredsRequire case "http": - return CREDS_DROP + return CredsDrop case "unix": // Preserving previous behavior from: // https://github.com/etcd-io/etcd/blob/dae29bb719dd69dc119146fc297a0628fcc1ccf8/client/v3/client.go#L212 // that likely was a bug due to missing 'fallthrough'. // At the same time it seems legit to let the users decide whether they // want credential control or not (and 'unixs' schema is not a standard thing). - return CREDS_OPTIONAL + return CredsOptional case "": - return CREDS_OPTIONAL + return CredsOptional default: - return CREDS_OPTIONAL + return CredsOptional } } @@ -106,7 +106,7 @@ func translateEndpoint(ep string) (addr string, serverName string, requireCreds if strings.Contains(ep, "://") { url, err := url.Parse(ep) if err != nil { - return ep, ep, CREDS_OPTIONAL + return ep, ep, CredsOptional } if url.Scheme == "http" || url.Scheme == "https" { return url.Host, url.Host, schemeToCredsRequirement(url.Scheme) @@ -114,7 +114,7 @@ func translateEndpoint(ep string) (addr string, serverName string, requireCreds return ep, url.Host, schemeToCredsRequirement(url.Scheme) } // Handles plain addresses like 10.0.0.44:437. - return ep, ep, CREDS_OPTIONAL + return ep, ep, CredsOptional } // RequiresCredentials returns whether given endpoint requires diff --git a/vendor/go.etcd.io/etcd/client/v3/internal/resolver/resolver.go b/vendor/go.etcd.io/etcd/client/v3/internal/resolver/resolver.go index 3ee3cb8e2bb9c..b5c9de00786e9 100644 --- a/vendor/go.etcd.io/etcd/client/v3/internal/resolver/resolver.go +++ b/vendor/go.etcd.io/etcd/client/v3/internal/resolver/resolver.go @@ -15,10 +15,11 @@ package resolver import ( - "go.etcd.io/etcd/client/v3/internal/endpoint" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/serviceconfig" + + "go.etcd.io/etcd/client/v3/internal/endpoint" ) const ( diff --git a/vendor/go.etcd.io/etcd/client/v3/kv.go b/vendor/go.etcd.io/etcd/client/v3/kv.go index be5b508dd6152..8d0c595d1e43d 100644 --- a/vendor/go.etcd.io/etcd/client/v3/kv.go +++ b/vendor/go.etcd.io/etcd/client/v3/kv.go @@ -17,9 +17,10 @@ package clientv3 import ( "context" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "google.golang.org/grpc" + + pb "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" ) type ( @@ -79,12 +80,15 @@ func (op OpResponse) Txn() *TxnResponse { return op.txn } func (resp *PutResponse) OpResponse() OpResponse { return OpResponse{put: resp} } + func (resp *GetResponse) OpResponse() OpResponse { return OpResponse{get: resp} } + func (resp *DeleteResponse) OpResponse() OpResponse { return OpResponse{del: resp} } + func (resp *TxnResponse) OpResponse() OpResponse { return OpResponse{txn: resp} } @@ -145,10 +149,14 @@ func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) { var err error switch op.t { case tRange: - var resp *pb.RangeResponse - resp, err = kv.remote.Range(ctx, op.toRangeRequest(), kv.callOpts...) - if err == nil { - return OpResponse{get: (*GetResponse)(resp)}, nil + if op.IsSortOptionValid() { + var resp *pb.RangeResponse + resp, err = kv.remote.Range(ctx, op.toRangeRequest(), kv.callOpts...) + if err == nil { + return OpResponse{get: (*GetResponse)(resp)}, nil + } + } else { + err = rpctypes.ErrInvalidSortOption } case tPut: var resp *pb.PutResponse diff --git a/vendor/go.etcd.io/etcd/client/v3/lease.go b/vendor/go.etcd.io/etcd/client/v3/lease.go index 4e7d1caf83125..bed24bc0dcef4 100644 --- a/vendor/go.etcd.io/etcd/client/v3/lease.go +++ b/vendor/go.etcd.io/etcd/client/v3/lease.go @@ -16,15 +16,16 @@ package clientv3 import ( "context" + "errors" "sync" "time" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/metadata" + + pb "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" ) type ( @@ -198,12 +199,12 @@ func NewLeaseFromLeaseClient(remote pb.LeaseClient, c *Client, keepAliveTimeout keepAlives: make(map[LeaseID]*keepAlive), remote: remote, firstKeepAliveTimeout: keepAliveTimeout, - lg: c.lg, } if l.firstKeepAliveTimeout == time.Second { l.firstKeepAliveTimeout = defaultTTL } if c != nil { + l.lg = c.lg l.callOpts = c.callOpts } reqLeaderCtx := WithRequireLeader(context.Background()) @@ -409,9 +410,9 @@ func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (karesp *LeaseKe } defer func() { - if err := stream.CloseSend(); err != nil { + if cerr := stream.CloseSend(); cerr != nil { if ferr == nil { - ferr = ContextError(ctx, err) + ferr = ContextError(ctx, cerr) } return } @@ -450,6 +451,9 @@ func (l *lessor) recvKeepAliveLoop() (gerr error) { for { stream, err := l.resetRecv() if err != nil { + l.lg.Warn("error occurred during lease keep alive loop", + zap.Error(err), + ) if canceledByCaller(l.stopCtx, err) { return err } @@ -461,7 +465,7 @@ func (l *lessor) recvKeepAliveLoop() (gerr error) { return err } - if ContextError(l.stopCtx, err) == rpctypes.ErrNoLeader { + if errors.Is(ContextError(l.stopCtx, err), rpctypes.ErrNoLeader) { l.closeRequireLeader() } break @@ -546,9 +550,12 @@ func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) { // deadlineLoop reaps any keep alive channels that have not received a response // within the lease TTL func (l *lessor) deadlineLoop() { + timer := time.NewTimer(time.Second) + defer timer.Stop() for { + timer.Reset(time.Second) select { - case <-time.After(time.Second): + case <-timer.C: case <-l.donec: return } @@ -582,7 +589,9 @@ func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) { for _, id := range tosend { r := &pb.LeaseKeepAliveRequest{ID: int64(id)} if err := stream.Send(r); err != nil { - // TODO do something with this error? + l.lg.Warn("error occurred during lease keep alive request sending", + zap.Error(err), + ) return } } diff --git a/vendor/go.etcd.io/etcd/client/v3/logger.go b/vendor/go.etcd.io/etcd/client/v3/logger.go index eaa35f2d3acb7..300363cd25bab 100644 --- a/vendor/go.etcd.io/etcd/client/v3/logger.go +++ b/vendor/go.etcd.io/etcd/client/v3/logger.go @@ -18,10 +18,11 @@ import ( "log" "os" - "go.etcd.io/etcd/client/pkg/v3/logutil" "go.uber.org/zap/zapcore" "go.uber.org/zap/zapgrpc" "google.golang.org/grpc/grpclog" + + "go.etcd.io/etcd/client/pkg/v3/logutil" ) func init() { @@ -52,7 +53,7 @@ func etcdClientDebugLevel() zapcore.Level { } var l zapcore.Level if err := l.Set(envLevel); err != nil { - log.Printf("Invalid value for environment variable 'ETCD_CLIENT_DEBUG'. Using default level: 'info'") + log.Print("Invalid value for environment variable 'ETCD_CLIENT_DEBUG'. Using default level: 'info'") return zapcore.InfoLevel } return l diff --git a/vendor/go.etcd.io/etcd/client/v3/maintenance.go b/vendor/go.etcd.io/etcd/client/v3/maintenance.go index 71b28e6dc3f94..00aaacd15fdc2 100644 --- a/vendor/go.etcd.io/etcd/client/v3/maintenance.go +++ b/vendor/go.etcd.io/etcd/client/v3/maintenance.go @@ -16,12 +16,14 @@ package clientv3 import ( "context" + "errors" "fmt" "io" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.uber.org/zap" "google.golang.org/grpc" + + pb "go.etcd.io/etcd/api/v3/etcdserverpb" ) type ( @@ -31,6 +33,15 @@ type ( StatusResponse pb.StatusResponse HashKVResponse pb.HashKVResponse MoveLeaderResponse pb.MoveLeaderResponse + DowngradeResponse pb.DowngradeResponse + + DowngradeAction pb.DowngradeRequest_DowngradeAction +) + +const ( + DowngradeValidate = DowngradeAction(pb.DowngradeRequest_VALIDATE) + DowngradeEnable = DowngradeAction(pb.DowngradeRequest_ENABLE) + DowngradeCancel = DowngradeAction(pb.DowngradeRequest_CANCEL) ) type Maintenance interface { @@ -57,14 +68,40 @@ type Maintenance interface { // is non-zero, the hash is computed on all keys at or below the given revision. HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) + // SnapshotWithVersion returns a reader for a point-in-time snapshot and version of etcd that created it. + // If the context "ctx" is canceled or timed out, reading from returned + // "io.ReadCloser" would error out (e.g. context.Canceled, context.DeadlineExceeded). + SnapshotWithVersion(ctx context.Context) (*SnapshotResponse, error) + // Snapshot provides a reader for a point-in-time snapshot of etcd. // If the context "ctx" is canceled or timed out, reading from returned // "io.ReadCloser" would error out (e.g. context.Canceled, context.DeadlineExceeded). + // Deprecated: use SnapshotWithVersion instead. Snapshot(ctx context.Context) (io.ReadCloser, error) // MoveLeader requests current leader to transfer its leadership to the transferee. // Request must be made to the leader. MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) + + // Downgrade requests downgrades, verifies feasibility or cancels downgrade + // on the cluster version. + // Supported since etcd 3.5. + Downgrade(ctx context.Context, action DowngradeAction, version string) (*DowngradeResponse, error) +} + +// SnapshotResponse is aggregated response from the snapshot stream. +// Consumer is responsible for closing steam by calling .Snapshot.Close() +type SnapshotResponse struct { + // Header is the first header in the snapshot stream, has the current key-value store information + // and indicates the point in time of the snapshot. + Header *pb.ResponseHeader + // Snapshot exposes ReaderCloser interface for data stored in the Blob field in the snapshot stream. + Snapshot io.ReadCloser + // Version is the local version of server that created the snapshot. + // In cluster with binaries with different version, each cluster can return different result. + // Informs which etcd server version should be used when restoring the snapshot. + // Supported on etcd >= v3.6. + Version string } type maintenance struct { @@ -80,22 +117,10 @@ func NewMaintenance(c *Client) Maintenance { dial: func(endpoint string) (pb.MaintenanceClient, func(), error) { conn, err := c.Dial(endpoint) if err != nil { - return nil, nil, fmt.Errorf("failed to dial endpoint %s with maintenance client: %v", endpoint, err) + return nil, nil, fmt.Errorf("failed to dial endpoint %s with maintenance client: %w", endpoint, err) } - //get token with established connection - dctx := c.ctx - cancel := func() {} - if c.cfg.DialTimeout > 0 { - dctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout) - } - err = c.getToken(dctx) - cancel() - if err != nil { - conn.Close() - return nil, nil, fmt.Errorf("failed to getToken from endpoint %s with maintenance client: %v", endpoint, err) - } - cancel = func() { conn.Close() } + cancel := func() { conn.Close() } return RetryMaintenanceClient(c, conn), cancel, nil }, remote: RetryMaintenanceClient(c, c.conn), @@ -108,7 +133,6 @@ func NewMaintenance(c *Client) Maintenance { func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient, c *Client) Maintenance { api := &maintenance{ - lg: c.lg, dial: func(string) (pb.MaintenanceClient, func(), error) { return remote, func() {}, nil }, @@ -116,6 +140,7 @@ func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient, c *Client) } if c != nil { api.callOpts = c.callOpts + api.lg = c.lg } return api } @@ -192,7 +217,6 @@ func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusRespo func (m *maintenance) HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) { remote, cancel, err := m.dial(endpoint) if err != nil { - return nil, ContextError(ctx, err) } defer cancel() @@ -203,7 +227,7 @@ func (m *maintenance) HashKV(ctx context.Context, endpoint string, rev int64) (* return (*HashKVResponse)(resp), nil } -func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) { +func (m *maintenance) SnapshotWithVersion(ctx context.Context) (*SnapshotResponse, error) { ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, append(m.callOpts, withMax(defaultStreamMaxRetries))...) if err != nil { return nil, ContextError(ctx, err) @@ -211,27 +235,60 @@ func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) { m.lg.Info("opened snapshot stream; downloading") pr, pw := io.Pipe() + + resp, err := ss.Recv() + if err != nil { + m.logAndCloseWithError(err, pw) + return nil, err + } go func() { + // Saving response is blocking + err := m.save(resp, pw) + if err != nil { + m.logAndCloseWithError(err, pw) + return + } for { - resp, err := ss.Recv() + sresp, err := ss.Recv() if err != nil { - switch err { - case io.EOF: - m.lg.Info("completed snapshot read; closing") - default: - m.lg.Warn("failed to receive from snapshot stream; closing", zap.Error(err)) - } - pw.CloseWithError(err) + m.logAndCloseWithError(err, pw) return } - // can "resp == nil && err == nil" - // before we receive snapshot SHA digest? - // No, server sends EOF with an empty response - // after it sends SHA digest at the end + err = m.save(sresp, pw) + if err != nil { + m.logAndCloseWithError(err, pw) + return + } + } + }() - if _, werr := pw.Write(resp.Blob); werr != nil { - pw.CloseWithError(werr) + return &SnapshotResponse{ + Header: resp.GetHeader(), + Snapshot: &snapshotReadCloser{ctx: ctx, ReadCloser: pr}, + Version: resp.GetVersion(), + }, nil +} + +func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) { + ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, append(m.callOpts, withMax(defaultStreamMaxRetries))...) + if err != nil { + return nil, ContextError(ctx, err) + } + + m.lg.Info("opened snapshot stream; downloading") + pr, pw := io.Pipe() + + go func() { + for { + resp, err := ss.Recv() + if err != nil { + m.logAndCloseWithError(err, pw) + return + } + err = m.save(resp, pw) + if err != nil { + m.logAndCloseWithError(err, pw) return } } @@ -239,6 +296,28 @@ func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) { return &snapshotReadCloser{ctx: ctx, ReadCloser: pr}, nil } +func (m *maintenance) logAndCloseWithError(err error, pw *io.PipeWriter) { + switch { + case errors.Is(err, io.EOF): + m.lg.Info("completed snapshot read; closing") + default: + m.lg.Warn("failed to receive from snapshot stream; closing", zap.Error(err)) + } + pw.CloseWithError(err) +} + +func (m *maintenance) save(resp *pb.SnapshotResponse, pw *io.PipeWriter) error { + // can "resp == nil && err == nil" + // before we receive snapshot SHA digest? + // No, server sends EOF with an empty response + // after it sends SHA digest at the end + + if _, werr := pw.Write(resp.Blob); werr != nil { + return werr + } + return nil +} + type snapshotReadCloser struct { ctx context.Context io.ReadCloser @@ -253,3 +332,19 @@ func (m *maintenance) MoveLeader(ctx context.Context, transfereeID uint64) (*Mov resp, err := m.remote.MoveLeader(ctx, &pb.MoveLeaderRequest{TargetID: transfereeID}, m.callOpts...) return (*MoveLeaderResponse)(resp), ContextError(ctx, err) } + +func (m *maintenance) Downgrade(ctx context.Context, action DowngradeAction, version string) (*DowngradeResponse, error) { + var actionType pb.DowngradeRequest_DowngradeAction + switch action { + case DowngradeValidate: + actionType = pb.DowngradeRequest_VALIDATE + case DowngradeEnable: + actionType = pb.DowngradeRequest_ENABLE + case DowngradeCancel: + actionType = pb.DowngradeRequest_CANCEL + default: + return nil, errors.New("etcdclient: unknown downgrade action") + } + resp, err := m.remote.Downgrade(ctx, &pb.DowngradeRequest{Action: actionType, Version: version}, m.callOpts...) + return (*DowngradeResponse)(resp), ContextError(ctx, err) +} diff --git a/vendor/go.etcd.io/etcd/client/v3/op.go b/vendor/go.etcd.io/etcd/client/v3/op.go index 5251906322c9c..e19f2e5e44f85 100644 --- a/vendor/go.etcd.io/etcd/client/v3/op.go +++ b/vendor/go.etcd.io/etcd/client/v3/op.go @@ -124,6 +124,10 @@ func (op Op) IsKeysOnly() bool { return op.keysOnly } // IsCountOnly returns whether countOnly is set. func (op Op) IsCountOnly() bool { return op.countOnly } +func (op Op) IsOptsWithFromKey() bool { return op.isOptsWithFromKey } + +func (op Op) IsOptsWithPrefix() bool { return op.isOptsWithPrefix } + // MinModRev returns the operation's minimum modify revision. func (op Op) MinModRev() int64 { return op.minModRev } @@ -418,9 +422,15 @@ func WithFromKey() OpOption { } } -// WithSerializable makes 'Get' request serializable. By default, -// it's linearizable. Serializable requests are better for lower latency -// requirement. +// WithSerializable makes `Get` and `MemberList` requests serializable. +// By default, they are linearizable. Serializable requests are better +// for lower latency requirement, but users should be aware that they +// could get stale data with serializable requests. +// +// In some situations users may want to use serializable requests. For +// example, when adding a new member to a one-node cluster, it's reasonable +// and safe to use serializable request before the new added member gets +// started. func WithSerializable() OpOption { return func(op *Op) { op.serializable = true } } @@ -581,3 +591,19 @@ func IsOptsWithFromKey(opts []OpOption) bool { return ret.isOptsWithFromKey } + +func (op Op) IsSortOptionValid() bool { + if op.sort != nil { + sortOrder := int32(op.sort.Order) + sortTarget := int32(op.sort.Target) + + if _, ok := pb.RangeRequest_SortOrder_name[sortOrder]; !ok { + return false + } + + if _, ok := pb.RangeRequest_SortTarget_name[sortTarget]; !ok { + return false + } + } + return true +} diff --git a/vendor/go.etcd.io/etcd/client/v3/options.go b/vendor/go.etcd.io/etcd/client/v3/options.go index cdae1b16a2aa1..cc10a03d76d1c 100644 --- a/vendor/go.etcd.io/etcd/client/v3/options.go +++ b/vendor/go.etcd.io/etcd/client/v3/options.go @@ -23,7 +23,7 @@ import ( var ( // client-side handling retrying of request failures where data was not written to the wire or - // where server indicates it did not process the data. gRPC default is default is "WaitForReady(false)" + // where server indicates it did not process the data. gRPC default is "WaitForReady(false)" // but for etcd we default to "WaitForReady(true)" to minimize client request error responses due to // transient failures. defaultWaitForReady = grpc.WaitForReady(true) diff --git a/vendor/go.etcd.io/etcd/client/v3/retry.go b/vendor/go.etcd.io/etcd/client/v3/retry.go index 69ecc63147197..9152c53a7d4c5 100644 --- a/vendor/go.etcd.io/etcd/client/v3/retry.go +++ b/vendor/go.etcd.io/etcd/client/v3/retry.go @@ -16,13 +16,14 @@ package clientv3 import ( "context" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" + "errors" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + + pb "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" ) type retryPolicy uint8 @@ -52,7 +53,8 @@ func (rp retryPolicy) String() string { // handle itself even with retries. func isSafeRetryImmutableRPC(err error) bool { eErr := rpctypes.Error(err) - if serverErr, ok := eErr.(rpctypes.EtcdError); ok && serverErr.Code() != codes.Unavailable { + var serverErr rpctypes.EtcdError + if errors.As(eErr, &serverErr) && serverErr.Code() != codes.Unavailable { // interrupted by non-transient server-side or gRPC-side error // client cannot handle itself (e.g. rpctypes.ErrCompacted) return false @@ -101,8 +103,9 @@ func RetryKVClient(c *Client) pb.KVClient { kc: pb.NewKVClient(c.conn), } } + func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) { - return rkv.kc.Range(ctx, in, append(opts, withRetryPolicy(repeatable))...) + return rkv.kc.Range(ctx, in, append(opts, withRepeatablePolicy())...) } func (rkv *retryKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) { @@ -133,23 +136,23 @@ func RetryLeaseClient(c *Client) pb.LeaseClient { } func (rlc *retryLeaseClient) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (resp *pb.LeaseTimeToLiveResponse, err error) { - return rlc.lc.LeaseTimeToLive(ctx, in, append(opts, withRetryPolicy(repeatable))...) + return rlc.lc.LeaseTimeToLive(ctx, in, append(opts, withRepeatablePolicy())...) } func (rlc *retryLeaseClient) LeaseLeases(ctx context.Context, in *pb.LeaseLeasesRequest, opts ...grpc.CallOption) (resp *pb.LeaseLeasesResponse, err error) { - return rlc.lc.LeaseLeases(ctx, in, append(opts, withRetryPolicy(repeatable))...) + return rlc.lc.LeaseLeases(ctx, in, append(opts, withRepeatablePolicy())...) } func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) { - return rlc.lc.LeaseGrant(ctx, in, append(opts, withRetryPolicy(repeatable))...) + return rlc.lc.LeaseGrant(ctx, in, append(opts, withRepeatablePolicy())...) } func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) { - return rlc.lc.LeaseRevoke(ctx, in, append(opts, withRetryPolicy(repeatable))...) + return rlc.lc.LeaseRevoke(ctx, in, append(opts, withRepeatablePolicy())...) } func (rlc *retryLeaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (stream pb.Lease_LeaseKeepAliveClient, err error) { - return rlc.lc.LeaseKeepAlive(ctx, append(opts, withRetryPolicy(repeatable))...) + return rlc.lc.LeaseKeepAlive(ctx, append(opts, withRepeatablePolicy())...) } type retryClusterClient struct { @@ -164,7 +167,7 @@ func RetryClusterClient(c *Client) pb.ClusterClient { } func (rcc *retryClusterClient) MemberList(ctx context.Context, in *pb.MemberListRequest, opts ...grpc.CallOption) (resp *pb.MemberListResponse, err error) { - return rcc.cc.MemberList(ctx, in, append(opts, withRetryPolicy(repeatable))...) + return rcc.cc.MemberList(ctx, in, append(opts, withRepeatablePolicy())...) } func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) { @@ -195,27 +198,27 @@ func RetryMaintenanceClient(c *Client, conn *grpc.ClientConn) pb.MaintenanceClie } func (rmc *retryMaintenanceClient) Alarm(ctx context.Context, in *pb.AlarmRequest, opts ...grpc.CallOption) (resp *pb.AlarmResponse, err error) { - return rmc.mc.Alarm(ctx, in, append(opts, withRetryPolicy(repeatable))...) + return rmc.mc.Alarm(ctx, in, append(opts, withRepeatablePolicy())...) } func (rmc *retryMaintenanceClient) Status(ctx context.Context, in *pb.StatusRequest, opts ...grpc.CallOption) (resp *pb.StatusResponse, err error) { - return rmc.mc.Status(ctx, in, append(opts, withRetryPolicy(repeatable))...) + return rmc.mc.Status(ctx, in, append(opts, withRepeatablePolicy())...) } func (rmc *retryMaintenanceClient) Hash(ctx context.Context, in *pb.HashRequest, opts ...grpc.CallOption) (resp *pb.HashResponse, err error) { - return rmc.mc.Hash(ctx, in, append(opts, withRetryPolicy(repeatable))...) + return rmc.mc.Hash(ctx, in, append(opts, withRepeatablePolicy())...) } func (rmc *retryMaintenanceClient) HashKV(ctx context.Context, in *pb.HashKVRequest, opts ...grpc.CallOption) (resp *pb.HashKVResponse, err error) { - return rmc.mc.HashKV(ctx, in, append(opts, withRetryPolicy(repeatable))...) + return rmc.mc.HashKV(ctx, in, append(opts, withRepeatablePolicy())...) } func (rmc *retryMaintenanceClient) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (stream pb.Maintenance_SnapshotClient, err error) { - return rmc.mc.Snapshot(ctx, in, append(opts, withRetryPolicy(repeatable))...) + return rmc.mc.Snapshot(ctx, in, append(opts, withRepeatablePolicy())...) } func (rmc *retryMaintenanceClient) MoveLeader(ctx context.Context, in *pb.MoveLeaderRequest, opts ...grpc.CallOption) (resp *pb.MoveLeaderResponse, err error) { - return rmc.mc.MoveLeader(ctx, in, append(opts, withRetryPolicy(repeatable))...) + return rmc.mc.MoveLeader(ctx, in, append(opts, withRepeatablePolicy())...) } func (rmc *retryMaintenanceClient) Defragment(ctx context.Context, in *pb.DefragmentRequest, opts ...grpc.CallOption) (resp *pb.DefragmentResponse, err error) { @@ -238,19 +241,19 @@ func RetryAuthClient(c *Client) pb.AuthClient { } func (rac *retryAuthClient) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (resp *pb.AuthUserListResponse, err error) { - return rac.ac.UserList(ctx, in, append(opts, withRetryPolicy(repeatable))...) + return rac.ac.UserList(ctx, in, append(opts, withRepeatablePolicy())...) } func (rac *retryAuthClient) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGetResponse, err error) { - return rac.ac.UserGet(ctx, in, append(opts, withRetryPolicy(repeatable))...) + return rac.ac.UserGet(ctx, in, append(opts, withRepeatablePolicy())...) } func (rac *retryAuthClient) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGetResponse, err error) { - return rac.ac.RoleGet(ctx, in, append(opts, withRetryPolicy(repeatable))...) + return rac.ac.RoleGet(ctx, in, append(opts, withRepeatablePolicy())...) } func (rac *retryAuthClient) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleListResponse, err error) { - return rac.ac.RoleList(ctx, in, append(opts, withRetryPolicy(repeatable))...) + return rac.ac.RoleList(ctx, in, append(opts, withRepeatablePolicy())...) } func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) { diff --git a/vendor/go.etcd.io/etcd/client/v3/retry_interceptor.go b/vendor/go.etcd.io/etcd/client/v3/retry_interceptor.go index 8c50dcfa93d7a..2b9301a5808d4 100644 --- a/vendor/go.etcd.io/etcd/client/v3/retry_interceptor.go +++ b/vendor/go.etcd.io/etcd/client/v3/retry_interceptor.go @@ -24,12 +24,13 @@ import ( "sync" "time" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" + + "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" ) // unaryClientInterceptor returns a new retrying unary client interceptor. @@ -38,7 +39,7 @@ import ( // changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions). func (c *Client) unaryClientInterceptor(optFuncs ...retryOption) grpc.UnaryClientInterceptor { intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs) - return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + return func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { ctx = withVersion(ctx) grpcOpts, retryOpts := filterCallOptions(opts) callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts) @@ -54,6 +55,7 @@ func (c *Client) unaryClientInterceptor(optFuncs ...retryOption) grpc.UnaryClien c.GetLogger().Debug( "retrying of unary invoker", zap.String("target", cc.Target()), + zap.String("method", method), zap.Uint("attempt", attempt), ) lastErr = invoker(ctx, method, req, reply, cc, grpcOpts...) @@ -63,6 +65,7 @@ func (c *Client) unaryClientInterceptor(optFuncs ...retryOption) grpc.UnaryClien c.GetLogger().Warn( "retrying of unary invoker failed", zap.String("target", cc.Target()), + zap.String("method", method), zap.Uint("attempt", attempt), zap.Error(lastErr), ) @@ -75,14 +78,14 @@ func (c *Client) unaryClientInterceptor(optFuncs ...retryOption) grpc.UnaryClien continue } if c.shouldRefreshToken(lastErr, callOpts) { - gterr := c.refreshToken(ctx) - if gterr != nil { + gtErr := c.refreshToken(ctx) + if gtErr != nil { c.GetLogger().Warn( "retrying of unary invoker failed to fetch new auth token", zap.String("target", cc.Target()), - zap.Error(gterr), + zap.Error(gtErr), ) - return gterr // lastErr must be invalid auth token + return gtErr // lastErr must be invalid auth token } continue } @@ -106,15 +109,12 @@ func (c *Client) streamClientInterceptor(optFuncs ...retryOption) grpc.StreamCli intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs) return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { ctx = withVersion(ctx) - // getToken automatically - // TODO(cfc4n): keep this code block, remove codes about getToken in client.go after pr #12165 merged. - if c.authTokenBundle != nil { - // equal to c.Username != "" && c.Password != "" - err := c.getToken(ctx) - if err != nil && rpctypes.Error(err) != rpctypes.ErrAuthNotEnabled { - c.GetLogger().Error("clientv3/retry_interceptor: getToken failed", zap.Error(err)) - return nil, err - } + // getToken automatically. Otherwise, auth token may be invalid after watch reconnection because the token has expired + // (see https://github.com/etcd-io/etcd/issues/11954 for more). + err := c.getToken(ctx) + if err != nil { + c.GetLogger().Error("clientv3/retry_interceptor: getToken failed", zap.Error(err)) + return nil, err } grpcOpts, retryOpts := filterCallOptions(opts) callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts) @@ -146,14 +146,14 @@ func (c *Client) streamClientInterceptor(optFuncs ...retryOption) grpc.StreamCli // shouldRefreshToken checks whether there's a need to refresh the token based on the error and callOptions, // and returns a boolean value. func (c *Client) shouldRefreshToken(err error, callOpts *options) bool { - if rpctypes.Error(err) == rpctypes.ErrUserEmpty { + if errors.Is(rpctypes.Error(err), rpctypes.ErrUserEmpty) { // refresh the token when username, password is present but the server returns ErrUserEmpty // which is possible when the client token is cleared somehow return c.authTokenBundle != nil // equal to c.Username != "" && c.Password != "" } return callOpts.retryAuth && - (rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken || rpctypes.Error(err) == rpctypes.ErrAuthOldRevision) + (errors.Is(rpctypes.Error(err), rpctypes.ErrInvalidAuthToken) || errors.Is(rpctypes.Error(err), rpctypes.ErrAuthOldRevision)) } func (c *Client) refreshToken(ctx context.Context) error { @@ -169,8 +169,7 @@ func (c *Client) refreshToken(ctx context.Context) error { // clients just need to retry the operations (e.g. Put, Delete etc). return nil } - // clear auth token before refreshing it. - c.authTokenBundle.UpdateAuthToken("") + return c.getToken(ctx) } @@ -180,9 +179,9 @@ func (c *Client) refreshToken(ctx context.Context) error { type serverStreamingRetryingStream struct { grpc.ClientStream client *Client - bufferedSends []interface{} // single message that the client can sen - receivedGood bool // indicates whether any prior receives were successful - wasClosedSend bool // indicates that CloseSend was closed + bufferedSends []any // single message that the client can sen + receivedGood bool // indicates whether any prior receives were successful + wasClosedSend bool // indicates that CloseSend was closed ctx context.Context callOpts *options streamerCall func(ctx context.Context) (grpc.ClientStream, error) @@ -201,7 +200,7 @@ func (s *serverStreamingRetryingStream) getStream() grpc.ClientStream { return s.ClientStream } -func (s *serverStreamingRetryingStream) SendMsg(m interface{}) error { +func (s *serverStreamingRetryingStream) SendMsg(m any) error { s.mu.Lock() s.bufferedSends = append(s.bufferedSends, m) s.mu.Unlock() @@ -223,7 +222,7 @@ func (s *serverStreamingRetryingStream) Trailer() metadata.MD { return s.getStream().Trailer() } -func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error { +func (s *serverStreamingRetryingStream) RecvMsg(m any) error { attemptRetry, lastErr := s.receiveMsgAndIndicateRetry(m) if !attemptRetry { return lastErr // success or hard failure @@ -250,12 +249,12 @@ func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error { return lastErr } -func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m interface{}) (bool, error) { +func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m any) (bool, error) { s.mu.RLock() wasGood := s.receivedGood s.mu.RUnlock() err := s.getStream().RecvMsg(m) - if err == nil || err == io.EOF { + if err == nil || errors.Is(err, io.EOF) { s.mu.Lock() s.receivedGood = true s.mu.Unlock() @@ -272,13 +271,12 @@ func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m interface{} return true, err } if s.client.shouldRefreshToken(err, s.callOpts) { - gterr := s.client.refreshToken(s.ctx) - if gterr != nil { - s.client.lg.Warn("retry failed to fetch new auth token", zap.Error(gterr)) + gtErr := s.client.refreshToken(s.ctx) + if gtErr != nil { + s.client.lg.Warn("retry failed to fetch new auth token", zap.Error(gtErr)) return false, err // return the original error for simplicity } return true, err - } return isSafeRetry(s.client, err, s.callOpts), err } @@ -312,7 +310,7 @@ func waitRetryBackoff(ctx context.Context, attempt uint, callOpts *options) erro select { case <-ctx.Done(): timer.Stop() - return contextErrToGrpcErr(ctx.Err()) + return contextErrToGRPCErr(ctx.Err()) case <-timer.C: } } @@ -331,7 +329,7 @@ func isSafeRetry(c *Client, err error, callOpts *options) bool { // customer provides mix of learners (not yet voters) and voters with an // expectation to pick voter in the next attempt. // TODO: Ideally client should be 'aware' which endpoint represents: leader/voter/learner with high probability. - if errors.Is(err, rpctypes.ErrGPRCNotSupportedForLearner) && len(c.Endpoints()) > 1 { + if errors.Is(err, rpctypes.ErrGRPCNotSupportedForLearner) && len(c.Endpoints()) > 1 { return true } @@ -350,25 +348,23 @@ func isContextError(err error) bool { return status.Code(err) == codes.DeadlineExceeded || status.Code(err) == codes.Canceled } -func contextErrToGrpcErr(err error) error { - switch err { - case context.DeadlineExceeded: +func contextErrToGRPCErr(err error) error { + switch { + case errors.Is(err, context.DeadlineExceeded): return status.Errorf(codes.DeadlineExceeded, err.Error()) - case context.Canceled: + case errors.Is(err, context.Canceled): return status.Errorf(codes.Canceled, err.Error()) default: return status.Errorf(codes.Unknown, err.Error()) } } -var ( - defaultOptions = &options{ - retryPolicy: nonRepeatable, - max: 0, // disable - backoffFunc: backoffLinearWithJitter(50*time.Millisecond /*jitter*/, 0.10), - retryAuth: true, - } -) +var defaultOptions = &options{ + retryPolicy: nonRepeatable, + max: 0, // disable + backoffFunc: backoffLinearWithJitter(50*time.Millisecond /*jitter*/, 0.10), + retryAuth: true, +} // backoffFunc denotes a family of functions that control the backoff duration between call retries. // @@ -378,10 +374,10 @@ var ( // with the next iteration. type backoffFunc func(attempt uint) time.Duration -// withRetryPolicy sets the retry policy of this call. -func withRetryPolicy(rp retryPolicy) retryOption { +// withRepeatablePolicy sets the repeatable policy of this call. +func withRepeatablePolicy() retryOption { return retryOption{applyFunc: func(o *options) { - o.retryPolicy = rp + o.retryPolicy = repeatable }} } @@ -392,7 +388,7 @@ func withMax(maxRetries uint) retryOption { }} } -// WithBackoff sets the `BackoffFunc `used to control time between retries. +// WithBackoff sets the `BackoffFunc` used to control time between retries. func withBackoff(bf backoffFunc) retryOption { return retryOption{applyFunc: func(o *options) { o.backoffFunc = bf diff --git a/vendor/go.etcd.io/etcd/client/v3/sort.go b/vendor/go.etcd.io/etcd/client/v3/sort.go index 2bb9d9a13b783..9918ea927fea0 100644 --- a/vendor/go.etcd.io/etcd/client/v3/sort.go +++ b/vendor/go.etcd.io/etcd/client/v3/sort.go @@ -14,8 +14,10 @@ package clientv3 -type SortTarget int -type SortOrder int +type ( + SortTarget int + SortOrder int +) const ( SortNone SortOrder = iota diff --git a/vendor/go.etcd.io/etcd/client/v3/txn.go b/vendor/go.etcd.io/etcd/client/v3/txn.go index e31bfe0b94d2f..0a57332ac7893 100644 --- a/vendor/go.etcd.io/etcd/client/v3/txn.go +++ b/vendor/go.etcd.io/etcd/client/v3/txn.go @@ -18,9 +18,9 @@ import ( "context" "sync" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "google.golang.org/grpc" + + pb "go.etcd.io/etcd/api/v3/etcdserverpb" ) // Txn is the interface that wraps mini-transactions. diff --git a/vendor/go.etcd.io/etcd/client/v3/watch.go b/vendor/go.etcd.io/etcd/client/v3/watch.go index 725e8a869e399..a46f98b8e287b 100644 --- a/vendor/go.etcd.io/etcd/client/v3/watch.go +++ b/vendor/go.etcd.io/etcd/client/v3/watch.go @@ -21,15 +21,15 @@ import ( "sync" "time" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/mvccpb" - v3rpc "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" + + pb "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.etcd.io/etcd/api/v3/mvccpb" + v3rpc "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" ) const ( @@ -149,12 +149,12 @@ type watcher struct { mu sync.Mutex // streams holds all the active grpc streams keyed by ctx value. - streams map[string]*watchGrpcStream + streams map[string]*watchGRPCStream lg *zap.Logger } -// watchGrpcStream tracks all watch resources attached to a single grpc stream. -type watchGrpcStream struct { +// watchGRPCStream tracks all watch resources attached to a single grpc stream. +type watchGRPCStream struct { owner *watcher remote pb.WatchClient callOpts []grpc.CallOption @@ -221,8 +221,7 @@ type watchRequest struct { } // progressRequest is issued by the subscriber to request watch progress -type progressRequest struct { -} +type progressRequest struct{} // watcherStream represents a registered watcher type watcherStream struct { @@ -251,7 +250,7 @@ func NewWatcher(c *Client) Watcher { func NewWatchFromWatchClient(wc pb.WatchClient, c *Client) Watcher { w := &watcher{ remote: wc, - streams: make(map[string]*watchGrpcStream), + streams: make(map[string]*watchGRPCStream), } if c != nil { w.callOpts = c.callOpts @@ -261,8 +260,10 @@ func NewWatchFromWatchClient(wc pb.WatchClient, c *Client) Watcher { } // never closes -var valCtxCh = make(chan struct{}) -var zeroTime = time.Unix(0, 0) +var ( + valCtxCh = make(chan struct{}) + zeroTime = time.Unix(0, 0) +) // ctx with only the values; never Done type valCtx struct{ context.Context } @@ -271,9 +272,9 @@ func (vc *valCtx) Deadline() (time.Time, bool) { return zeroTime, false } func (vc *valCtx) Done() <-chan struct{} { return valCtxCh } func (vc *valCtx) Err() error { return nil } -func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream { +func (w *watcher) newWatcherGRPCStream(inctx context.Context) *watchGRPCStream { ctx, cancel := context.WithCancel(&valCtx{inctx}) - wgs := &watchGrpcStream{ + wgs := &watchGRPCStream{ owner: w, remote: w.remote, callOpts: w.callOpts, @@ -334,7 +335,7 @@ func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) Watch } wgs := w.streams[ctxKey] if wgs == nil { - wgs = w.newWatcherGrpcStream(ctx) + wgs = w.newWatcherGRPCStream(ctx) w.streams[ctxKey] = wgs } donec := wgs.donec @@ -395,7 +396,7 @@ func (w *watcher) Close() (err error) { } } // Consider context.Canceled as a successful close - if err == context.Canceled { + if errors.Is(err, context.Canceled) { err = nil } return err @@ -408,11 +409,11 @@ func (w *watcher) RequestProgress(ctx context.Context) (err error) { w.mu.Lock() if w.streams == nil { w.mu.Unlock() - return fmt.Errorf("no stream found for context") + return errors.New("no stream found for context") } wgs := w.streams[ctxKey] if wgs == nil { - wgs = w.newWatcherGrpcStream(ctx) + wgs = w.newWatcherGRPCStream(ctx) w.streams[ctxKey] = wgs } donec := wgs.donec @@ -435,7 +436,7 @@ func (w *watcher) RequestProgress(ctx context.Context) (err error) { } } -func (w *watchGrpcStream) close() (err error) { +func (w *watchGRPCStream) close() (err error) { w.cancel() <-w.donec select { @@ -445,7 +446,7 @@ func (w *watchGrpcStream) close() (err error) { return ContextError(w.ctx, err) } -func (w *watcher) closeStream(wgs *watchGrpcStream) { +func (w *watcher) closeStream(wgs *watchGRPCStream) { w.mu.Lock() close(wgs.donec) wgs.cancel() @@ -455,7 +456,7 @@ func (w *watcher) closeStream(wgs *watchGrpcStream) { w.mu.Unlock() } -func (w *watchGrpcStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream) { +func (w *watchGRPCStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream) { // check watch ID for backward compatibility (<= v3.3) if resp.WatchId == InvalidWatchID || (resp.Canceled && resp.CancelReason != "") { w.closeErr = v3rpc.Error(errors.New(resp.CancelReason)) @@ -467,7 +468,7 @@ func (w *watchGrpcStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream w.substreams[ws.id] = ws } -func (w *watchGrpcStream) sendCloseSubstream(ws *watcherStream, resp *WatchResponse) { +func (w *watchGRPCStream) sendCloseSubstream(ws *watcherStream, resp *WatchResponse) { select { case ws.outc <- *resp: case <-ws.initReq.ctx.Done(): @@ -476,7 +477,7 @@ func (w *watchGrpcStream) sendCloseSubstream(ws *watcherStream, resp *WatchRespo close(ws.outc) } -func (w *watchGrpcStream) closeSubstream(ws *watcherStream) { +func (w *watchGRPCStream) closeSubstream(ws *watcherStream) { // send channel response in case stream was never established select { case ws.initReq.retc <- ws.outc: @@ -501,7 +502,7 @@ func (w *watchGrpcStream) closeSubstream(ws *watcherStream) { } // run is the root of the goroutines for managing a watcher client -func (w *watchGrpcStream) run() { +func (w *watchGRPCStream) run() { var wc pb.Watch_WatchClient var closeErr error @@ -653,7 +654,7 @@ func (w *watchGrpcStream) run() { // watch client failed on Recv; spawn another if possible case err := <-w.errc: - if isHaltErr(w.ctx, err) || ContextError(w.ctx, err) == v3rpc.ErrNoLeader { + if isHaltErr(w.ctx, err) || errors.Is(ContextError(w.ctx, err), v3rpc.ErrNoLeader) { closeErr = err return } @@ -699,7 +700,7 @@ func (w *watchGrpcStream) run() { // nextResume chooses the next resuming to register with the grpc stream. Abandoned // streams are marked as nil in the queue since the head must wait for its inflight registration. -func (w *watchGrpcStream) nextResume() *watcherStream { +func (w *watchGRPCStream) nextResume() *watcherStream { for len(w.resuming) != 0 { if w.resuming[0] != nil { return w.resuming[0] @@ -710,7 +711,7 @@ func (w *watchGrpcStream) nextResume() *watcherStream { } // dispatchEvent sends a WatchResponse to the appropriate watcher stream -func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool { +func (w *watchGRPCStream) dispatchEvent(pbresp *pb.WatchResponse) bool { events := make([]*Event, len(pbresp.Events)) for i, ev := range pbresp.Events { events[i] = (*Event)(ev) @@ -732,11 +733,10 @@ func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool { } return w.unicastResponse(wr, pbresp.WatchId) - } // broadcastResponse send a watch response to all watch substreams. -func (w *watchGrpcStream) broadcastResponse(wr *WatchResponse) bool { +func (w *watchGRPCStream) broadcastResponse(wr *WatchResponse) bool { for _, ws := range w.substreams { select { case ws.recvc <- wr: @@ -747,8 +747,8 @@ func (w *watchGrpcStream) broadcastResponse(wr *WatchResponse) bool { } // unicastResponse sends a watch response to a specific watch substream. -func (w *watchGrpcStream) unicastResponse(wr *WatchResponse, watchId int64) bool { - ws, ok := w.substreams[watchId] +func (w *watchGRPCStream) unicastResponse(wr *WatchResponse, watchID int64) bool { + ws, ok := w.substreams[watchID] if !ok { return false } @@ -761,7 +761,7 @@ func (w *watchGrpcStream) unicastResponse(wr *WatchResponse, watchId int64) bool } // serveWatchClient forwards messages from the grpc stream to run() -func (w *watchGrpcStream) serveWatchClient(wc pb.Watch_WatchClient) { +func (w *watchGRPCStream) serveWatchClient(wc pb.Watch_WatchClient) { for { resp, err := wc.Recv() if err != nil { @@ -780,7 +780,7 @@ func (w *watchGrpcStream) serveWatchClient(wc pb.Watch_WatchClient) { } // serveSubstream forwards watch responses from run() to the subscriber -func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{}) { +func (w *watchGRPCStream) serveSubstream(ws *watcherStream, resumec chan struct{}) { if ws.closing { panic("created substream goroutine but substream is closing") } @@ -854,6 +854,7 @@ func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{ if len(wr.Events) > 0 { nextRev = wr.Events[len(wr.Events)-1].Kv.ModRevision + 1 } + ws.initReq.rev = nextRev // created event is already sent above, @@ -876,7 +877,7 @@ func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{ // lazily send cancel message if events on missing id } -func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) { +func (w *watchGRPCStream) newWatchClient() (pb.Watch_WatchClient, error) { // mark all substreams as resuming close(w.resumec) w.resumec = make(chan struct{}) @@ -922,7 +923,7 @@ func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) { return wc, nil } -func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan struct{} { +func (w *watchGRPCStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan struct{} { var wg sync.WaitGroup wg.Add(len(w.resuming)) donec := make(chan struct{}) @@ -959,7 +960,7 @@ func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan str } // joinSubstreams waits for all substream goroutines to complete. -func (w *watchGrpcStream) joinSubstreams() { +func (w *watchGRPCStream) joinSubstreams() { for _, ws := range w.substreams { <-ws.donec } @@ -972,7 +973,7 @@ func (w *watchGrpcStream) joinSubstreams() { var maxBackoff = 100 * time.Millisecond -func (w *watchGrpcStream) backoffIfUnavailable(backoff time.Duration, err error) time.Duration { +func (w *watchGRPCStream) backoffIfUnavailable(backoff time.Duration, err error) time.Duration { if isUnavailableErr(w.ctx, err) { // retry, but backoff if backoff < maxBackoff { @@ -990,7 +991,7 @@ func (w *watchGrpcStream) backoffIfUnavailable(backoff time.Duration, err error) // openWatchClient retries opening a watch client until success or halt. // manually retry in case "ws==nil && err==nil" // TODO: remove FailFast=false -func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) { +func (w *watchGRPCStream) openWatchClient() (ws pb.Watch_WatchClient, err error) { backoff := time.Millisecond for { select { diff --git a/vendor/go.etcd.io/etcd/pkg/v3/adt/interval_tree.go b/vendor/go.etcd.io/etcd/pkg/v3/adt/interval_tree.go index 271e3d3893449..3c1c3ea8322a6 100644 --- a/vendor/go.etcd.io/etcd/pkg/v3/adt/interval_tree.go +++ b/vendor/go.etcd.io/etcd/pkg/v3/adt/interval_tree.go @@ -176,7 +176,7 @@ func (x *intervalNode) visit(iv *Interval, sentinel *intervalNode, nv nodeVisito // IntervalValue represents a range tree node that contains a range and a value. type IntervalValue struct { Ivl Interval - Val interface{} + Val any } // IntervalTree represents a (mostly) textbook implementation of the @@ -184,7 +184,7 @@ type IntervalValue struct { // and chapter 14.3 interval tree with search supporting "stabbing queries". type IntervalTree interface { // Insert adds a node with the given interval into the tree. - Insert(ivl Interval, val interface{}) + Insert(ivl Interval, val any) // Delete removes the node with the given interval from the tree, returning // true if a node is in fact removed. Delete(ivl Interval) bool @@ -241,34 +241,34 @@ type intervalTree struct { // // "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.4, p324 // -// RB-DELETE(T, z) -// -// y = z -// y-original-color = y.color -// -// if z.left == T.nil -// x = z.right -// RB-TRANSPLANT(T, z, z.right) -// else if z.right == T.nil -// x = z.left -// RB-TRANSPLANT(T, z, z.left) -// else -// y = TREE-MINIMUM(z.right) -// y-original-color = y.color -// x = y.right -// if y.p == z -// x.p = y -// else -// RB-TRANSPLANT(T, y, y.right) -// y.right = z.right -// y.right.p = y -// RB-TRANSPLANT(T, z, y) -// y.left = z.left -// y.left.p = y -// y.color = z.color -// -// if y-original-color == BLACK -// RB-DELETE-FIXUP(T, x) +// RB-DELETE(T, z) +// +// y = z +// y-original-color = y.color +// +// if z.left == T.nil +// x = z.right +// RB-TRANSPLANT(T, z, z.right) +// else if z.right == T.nil +// x = z.left +// RB-TRANSPLANT(T, z, z.left) +// else +// y = TREE-MINIMUM(z.right) +// y-original-color = y.color +// x = y.right +// if y.p == z +// x.p = y +// else +// RB-TRANSPLANT(T, y, y.right) +// y.right = z.right +// y.right.p = y +// RB-TRANSPLANT(T, z, y) +// y.left = z.left +// y.left.p = y +// y.color = z.color +// +// if y-original-color == BLACK +// RB-DELETE-FIXUP(T, x) // Delete removes the node with the given interval from the tree, returning // true if a node is in fact removed. @@ -423,7 +423,7 @@ func (ivt *intervalTree) deleteFixup(x *intervalNode) { } } -func (ivt *intervalTree) createIntervalNode(ivl Interval, val interface{}) *intervalNode { +func (ivt *intervalTree) createIntervalNode(ivl Interval, val any) *intervalNode { return &intervalNode{ iv: IntervalValue{ivl, val}, max: ivl.End, @@ -438,35 +438,35 @@ func (ivt *intervalTree) createIntervalNode(ivl Interval, val interface{}) *inte // // "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.3, p315 // -// RB-INSERT(T, z) +// RB-INSERT(T, z) // -// y = T.nil -// x = T.root +// y = T.nil +// x = T.root // -// while x ≠ T.nil -// y = x -// if z.key < x.key -// x = x.left -// else -// x = x.right +// while x ≠ T.nil +// y = x +// if z.key < x.key +// x = x.left +// else +// x = x.right // -// z.p = y +// z.p = y // -// if y == T.nil -// T.root = z -// else if z.key < y.key -// y.left = z -// else -// y.right = z +// if y == T.nil +// T.root = z +// else if z.key < y.key +// y.left = z +// else +// y.right = z // -// z.left = T.nil -// z.right = T.nil -// z.color = RED +// z.left = T.nil +// z.right = T.nil +// z.color = RED // -// RB-INSERT-FIXUP(T, z) +// RB-INSERT-FIXUP(T, z) // Insert adds a node with the given interval into the tree. -func (ivt *intervalTree) Insert(ivl Interval, val interface{}) { +func (ivt *intervalTree) Insert(ivl Interval, val any) { y := ivt.sentinel z := ivt.createIntervalNode(ivl, val) x := ivt.root @@ -532,7 +532,6 @@ func (ivt *intervalTree) Insert(ivl Interval, val interface{}) { func (ivt *intervalTree) insertFixup(z *intervalNode) { for z.parent.color(ivt.sentinel) == red { if z.parent == z.parent.parent.left { // line 3-15 - y := z.parent.parent.right if y.color(ivt.sentinel) == red { y.c = black diff --git a/vendor/go.etcd.io/etcd/pkg/v3/contention/contention.go b/vendor/go.etcd.io/etcd/pkg/v3/contention/contention.go index 26ce9a2f3473c..d883eb3de9a0f 100644 --- a/vendor/go.etcd.io/etcd/pkg/v3/contention/contention.go +++ b/vendor/go.etcd.io/etcd/pkg/v3/contention/contention.go @@ -27,8 +27,7 @@ import ( type TimeoutDetector struct { mu sync.Mutex // protects all maxDuration time.Duration - // map from event to time - // time is the last seen time of the event. + // map from event to last seen time of event. records map[uint64]time.Time } @@ -40,7 +39,7 @@ func NewTimeoutDetector(maxDuration time.Duration) *TimeoutDetector { } } -// Reset resets the NewTimeoutDetector. +// Reset resets the TimeoutDetector. func (td *TimeoutDetector) Reset() { td.mu.Lock() defer td.mu.Unlock() @@ -48,9 +47,11 @@ func (td *TimeoutDetector) Reset() { td.records = make(map[uint64]time.Time) } -// Observe observes an event for given id. It returns false and exceeded duration -// if the interval is longer than the expectation. -func (td *TimeoutDetector) Observe(which uint64) (bool, time.Duration) { +// Observe observes an event of given id. It computes +// the time elapsed between successive events of given id. +// It returns whether this time elapsed exceeds the expectation, +// and the amount by which it exceeds the expectation. +func (td *TimeoutDetector) Observe(id uint64) (bool, time.Duration) { td.mu.Lock() defer td.mu.Unlock() @@ -58,12 +59,12 @@ func (td *TimeoutDetector) Observe(which uint64) (bool, time.Duration) { now := time.Now() exceed := time.Duration(0) - if pt, found := td.records[which]; found { + if pt, found := td.records[id]; found { exceed = now.Sub(pt) - td.maxDuration if exceed > 0 { ok = false } } - td.records[which] = now + td.records[id] = now return ok, exceed } diff --git a/vendor/go.etcd.io/etcd/pkg/v3/cpuutil/endian.go b/vendor/go.etcd.io/etcd/pkg/v3/cpuutil/endian.go index 06c06cd4a5fa5..d654b747664dc 100644 --- a/vendor/go.etcd.io/etcd/pkg/v3/cpuutil/endian.go +++ b/vendor/go.etcd.io/etcd/pkg/v3/cpuutil/endian.go @@ -19,7 +19,7 @@ import ( "unsafe" ) -const intWidth int = int(unsafe.Sizeof(0)) +const intWidth = int(unsafe.Sizeof(0)) var byteOrder binary.ByteOrder @@ -27,7 +27,7 @@ var byteOrder binary.ByteOrder func ByteOrder() binary.ByteOrder { return byteOrder } func init() { - i := int(0x1) + i := 0x1 if v := (*[intWidth]byte)(unsafe.Pointer(&i)); v[0] == 0 { byteOrder = binary.BigEndian } else { diff --git a/vendor/go.etcd.io/etcd/pkg/v3/featuregate/feature_gate.go b/vendor/go.etcd.io/etcd/pkg/v3/featuregate/feature_gate.go new file mode 100644 index 0000000000000..b9e0175bef296 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/v3/featuregate/feature_gate.go @@ -0,0 +1,418 @@ +// Copyright 2024 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package featuregate is copied from k8s.io/component-base@v0.30.1 to avoid any potential circular dependency between k8s and etcd. +package featuregate + +import ( + "flag" + "fmt" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + + "github.com/spf13/pflag" + "go.uber.org/zap" +) + +type Feature string + +const ( + defaultFlagName = "feature-gates" + + // allAlphaGate is a global toggle for alpha features. Per-feature key + // values override the default set by allAlphaGate. Examples: + // AllAlpha=false,NewFeature=true will result in newFeature=true + // AllAlpha=true,NewFeature=false will result in newFeature=false + allAlphaGate Feature = "AllAlpha" + + // allBetaGate is a global toggle for beta features. Per-feature key + // values override the default set by allBetaGate. Examples: + // AllBeta=false,NewFeature=true will result in NewFeature=true + // AllBeta=true,NewFeature=false will result in NewFeature=false + allBetaGate Feature = "AllBeta" +) + +var ( + // The generic features. + defaultFeatures = map[Feature]FeatureSpec{ + allAlphaGate: {Default: false, PreRelease: Alpha}, + allBetaGate: {Default: false, PreRelease: Beta}, + } + + // Special handling for a few gates. + specialFeatures = map[Feature]func(known map[Feature]FeatureSpec, enabled map[Feature]bool, val bool){ + allAlphaGate: setUnsetAlphaGates, + allBetaGate: setUnsetBetaGates, + } +) + +type FeatureSpec struct { + // Default is the default enablement state for the feature + Default bool + // LockToDefault indicates that the feature is locked to its default and cannot be changed + LockToDefault bool + // PreRelease indicates the maturity level of the feature + PreRelease prerelease +} + +type prerelease string + +const ( + // Values for PreRelease. + Alpha = prerelease("ALPHA") + Beta = prerelease("BETA") + GA = prerelease("") + + // Deprecated + Deprecated = prerelease("DEPRECATED") +) + +// FeatureGate indicates whether a given feature is enabled or not +type FeatureGate interface { + // Enabled returns true if the key is enabled. + Enabled(key Feature) bool + // KnownFeatures returns a slice of strings describing the FeatureGate's known features. + KnownFeatures() []string + // DeepCopy returns a deep copy of the FeatureGate object, such that gates can be + // set on the copy without mutating the original. This is useful for validating + // config against potential feature gate changes before committing those changes. + DeepCopy() MutableFeatureGate +} + +// MutableFeatureGate parses and stores flag gates for known features from +// a string like feature1=true,feature2=false,... +type MutableFeatureGate interface { + FeatureGate + + // AddFlag adds a flag for setting global feature gates to the specified FlagSet. + AddFlag(fs *flag.FlagSet, flagName string) + // Set parses and stores flag gates for known features + // from a string like feature1=true,feature2=false,... + Set(value string) error + // SetFromMap stores flag gates for known features from a map[string]bool or returns an error + SetFromMap(m map[string]bool) error + // Add adds features to the featureGate. + Add(features map[Feature]FeatureSpec) error + // GetAll returns a copy of the map of known feature names to feature specs. + GetAll() map[Feature]FeatureSpec + // AddMetrics adds feature enablement metrics + AddMetrics() + // OverrideDefault sets a local override for the registered default value of a named + // feature. If the feature has not been previously registered (e.g. by a call to Add), has a + // locked default, or if the gate has already registered itself with a FlagSet, a non-nil + // error is returned. + // + // When two or more components consume a common feature, one component can override its + // default at runtime in order to adopt new defaults before or after the other + // components. For example, a new feature can be evaluated with a limited blast radius by + // overriding its default to true for a limited number of components without simultaneously + // changing its default for all consuming components. + OverrideDefault(name Feature, override bool) error +} + +// featureGate implements FeatureGate as well as pflag.Value for flag parsing. +type featureGate struct { + lg *zap.Logger + + featureGateName string + + special map[Feature]func(map[Feature]FeatureSpec, map[Feature]bool, bool) + + // lock guards writes to known, enabled, and reads/writes of closed + lock sync.Mutex + // known holds a map[Feature]FeatureSpec + known atomic.Value + // enabled holds a map[Feature]bool + enabled atomic.Value + // closed is set to true when AddFlag is called, and prevents subsequent calls to Add + closed bool +} + +func setUnsetAlphaGates(known map[Feature]FeatureSpec, enabled map[Feature]bool, val bool) { + for k, v := range known { + if v.PreRelease == Alpha { + if _, found := enabled[k]; !found { + enabled[k] = val + } + } + } +} + +func setUnsetBetaGates(known map[Feature]FeatureSpec, enabled map[Feature]bool, val bool) { + for k, v := range known { + if v.PreRelease == Beta { + if _, found := enabled[k]; !found { + enabled[k] = val + } + } + } +} + +// Set, String, and Type implement pflag.Value +var _ pflag.Value = &featureGate{} + +func New(name string, lg *zap.Logger) *featureGate { + if lg == nil { + lg = zap.NewNop() + } + known := map[Feature]FeatureSpec{} + for k, v := range defaultFeatures { + known[k] = v + } + + f := &featureGate{ + lg: lg, + featureGateName: name, + special: specialFeatures, + } + f.known.Store(known) + f.enabled.Store(map[Feature]bool{}) + + return f +} + +// Set parses a string of the form "key1=value1,key2=value2,..." into a +// map[string]bool of known keys or returns an error. +func (f *featureGate) Set(value string) error { + m := make(map[string]bool) + for _, s := range strings.Split(value, ",") { + if len(s) == 0 { + continue + } + arr := strings.SplitN(s, "=", 2) + k := strings.TrimSpace(arr[0]) + if len(arr) != 2 { + return fmt.Errorf("missing bool value for %s", k) + } + v := strings.TrimSpace(arr[1]) + boolValue, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("invalid value of %s=%s, err: %w", k, v, err) + } + m[k] = boolValue + } + return f.SetFromMap(m) +} + +// SetFromMap stores flag gates for known features from a map[string]bool or returns an error +func (f *featureGate) SetFromMap(m map[string]bool) error { + f.lock.Lock() + defer f.lock.Unlock() + + // Copy existing state + known := map[Feature]FeatureSpec{} + for k, v := range f.known.Load().(map[Feature]FeatureSpec) { + known[k] = v + } + enabled := map[Feature]bool{} + for k, v := range f.enabled.Load().(map[Feature]bool) { + enabled[k] = v + } + + for k, v := range m { + k := Feature(k) + featureSpec, ok := known[k] + if !ok { + return fmt.Errorf("unrecognized feature gate: %s", k) + } + if featureSpec.LockToDefault && featureSpec.Default != v { + return fmt.Errorf("cannot set feature gate %v to %v, feature is locked to %v", k, v, featureSpec.Default) + } + enabled[k] = v + // Handle "special" features like "all alpha gates" + if fn, found := f.special[k]; found { + fn(known, enabled, v) + } + + if featureSpec.PreRelease == Deprecated { + f.lg.Warn(fmt.Sprintf("Setting deprecated feature gate %s=%t. It will be removed in a future release.", k, v)) + } else if featureSpec.PreRelease == GA { + f.lg.Warn(fmt.Sprintf("Setting GA feature gate %s=%t. It will be removed in a future release.", k, v)) + } + } + + // Persist changes + f.known.Store(known) + f.enabled.Store(enabled) + + f.lg.Info(fmt.Sprintf("feature gates: %v", f.enabled)) + return nil +} + +// String returns a string containing all enabled feature gates, formatted as "key1=value1,key2=value2,...". +func (f *featureGate) String() string { + pairs := []string{} + for k, v := range f.enabled.Load().(map[Feature]bool) { + pairs = append(pairs, fmt.Sprintf("%s=%t", k, v)) + } + sort.Strings(pairs) + return strings.Join(pairs, ",") +} + +func (f *featureGate) Type() string { + return "mapStringBool" +} + +// Add adds features to the featureGate. +func (f *featureGate) Add(features map[Feature]FeatureSpec) error { + f.lock.Lock() + defer f.lock.Unlock() + + if f.closed { + return fmt.Errorf("cannot add a feature gate after adding it to the flag set") + } + + // Copy existing state + known := map[Feature]FeatureSpec{} + for k, v := range f.known.Load().(map[Feature]FeatureSpec) { + known[k] = v + } + + for name, spec := range features { + if existingSpec, found := known[name]; found { + if existingSpec == spec { + continue + } + return fmt.Errorf("feature gate %q with different spec already exists: %v", name, existingSpec) + } + + known[name] = spec + } + + // Persist updated state + f.known.Store(known) + + return nil +} + +func (f *featureGate) OverrideDefault(name Feature, override bool) error { + f.lock.Lock() + defer f.lock.Unlock() + + if f.closed { + return fmt.Errorf("cannot override default for feature %q: gates already added to a flag set", name) + } + + known := map[Feature]FeatureSpec{} + for name, spec := range f.known.Load().(map[Feature]FeatureSpec) { + known[name] = spec + } + + spec, ok := known[name] + switch { + case !ok: + return fmt.Errorf("cannot override default: feature %q is not registered", name) + case spec.LockToDefault: + return fmt.Errorf("cannot override default: feature %q default is locked to %t", name, spec.Default) + case spec.PreRelease == Deprecated: + f.lg.Warn(fmt.Sprintf("Overriding default of deprecated feature gate %s=%t. It will be removed in a future release.", name, override)) + case spec.PreRelease == GA: + f.lg.Warn(fmt.Sprintf("Overriding default of GA feature gate %s=%t. It will be removed in a future release.", name, override)) + } + + spec.Default = override + known[name] = spec + f.known.Store(known) + + return nil +} + +// GetAll returns a copy of the map of known feature names to feature specs. +func (f *featureGate) GetAll() map[Feature]FeatureSpec { + retval := map[Feature]FeatureSpec{} + for k, v := range f.known.Load().(map[Feature]FeatureSpec) { + retval[k] = v + } + return retval +} + +// Enabled returns true if the key is enabled. If the key is not known, this call will panic. +func (f *featureGate) Enabled(key Feature) bool { + if v, ok := f.enabled.Load().(map[Feature]bool)[key]; ok { + return v + } + if v, ok := f.known.Load().(map[Feature]FeatureSpec)[key]; ok { + return v.Default + } + + panic(fmt.Errorf("feature %q is not registered in FeatureGate %q", key, f.featureGateName)) +} + +// AddFlag adds a flag for setting global feature gates to the specified FlagSet. +func (f *featureGate) AddFlag(fs *flag.FlagSet, flagName string) { + if flagName == "" { + flagName = defaultFlagName + } + f.lock.Lock() + // TODO(mtaufen): Shouldn't we just close it on the first Set/SetFromMap instead? + // Not all components expose a feature gates flag using this AddFlag method, and + // in the future, all components will completely stop exposing a feature gates flag, + // in favor of componentconfig. + f.closed = true + f.lock.Unlock() + + known := f.KnownFeatures() + fs.Var(f, flagName, ""+ + "A set of key=value pairs that describe feature gates for alpha/experimental features. "+ + "Options are:\n"+strings.Join(known, "\n")) +} + +func (f *featureGate) AddMetrics() { + // TODO(henrybear327): implement this. +} + +// KnownFeatures returns a slice of strings describing the FeatureGate's known features. +// Deprecated and GA features are hidden from the list. +func (f *featureGate) KnownFeatures() []string { + var known []string + for k, v := range f.known.Load().(map[Feature]FeatureSpec) { + if v.PreRelease == GA || v.PreRelease == Deprecated { + continue + } + known = append(known, fmt.Sprintf("%s=true|false (%s - default=%t)", k, v.PreRelease, v.Default)) + } + sort.Strings(known) + return known +} + +// DeepCopy returns a deep copy of the FeatureGate object, such that gates can be +// set on the copy without mutating the original. This is useful for validating +// config against potential feature gate changes before committing those changes. +func (f *featureGate) DeepCopy() MutableFeatureGate { + // Copy existing state. + known := map[Feature]FeatureSpec{} + for k, v := range f.known.Load().(map[Feature]FeatureSpec) { + known[k] = v + } + enabled := map[Feature]bool{} + for k, v := range f.enabled.Load().(map[Feature]bool) { + enabled[k] = v + } + + // Construct a new featureGate around the copied state. + // Note that specialFeatures is treated as immutable by convention, + // and we maintain the value of f.closed across the copy. + fg := &featureGate{ + special: specialFeatures, + closed: f.closed, + } + + fg.known.Store(known) + fg.enabled.Store(enabled) + + return fg +} diff --git a/vendor/go.etcd.io/etcd/pkg/v3/flags/flag.go b/vendor/go.etcd.io/etcd/pkg/v3/flags/flag.go index 76a51a8901941..b48921c6dcdee 100644 --- a/vendor/go.etcd.io/etcd/pkg/v3/flags/flag.go +++ b/vendor/go.etcd.io/etcd/pkg/v3/flags/flag.go @@ -19,6 +19,7 @@ import ( "flag" "fmt" "os" + "strconv" "strings" "github.com/spf13/pflag" @@ -66,7 +67,7 @@ func SetPflagsFromEnv(lg *zap.Logger, prefix string, fs *pflag.FlagSet) error { // FlagToEnv converts flag string to upper-case environment variable key string. func FlagToEnv(prefix, name string) string { - return prefix + "_" + strings.ToUpper(strings.Replace(name, "-", "_", -1)) + return prefix + "_" + strings.ToUpper(strings.ReplaceAll(name, "-", "_")) } func verifyEnv(lg *zap.Logger, prefix string, usedEnvKey, alreadySet map[string]bool) { @@ -107,7 +108,7 @@ func setFlagFromEnv(lg *zap.Logger, fs flagSetter, prefix, fname string, usedEnv if val != "" { usedEnvKey[key] = true if serr := fs.Set(fname, val); serr != nil { - return fmt.Errorf("invalid value %q for %s: %v", val, key, serr) + return fmt.Errorf("invalid value %q for %s: %w", val, key, serr) } if log && lg != nil { lg.Info( @@ -130,3 +131,16 @@ func IsSet(fs *flag.FlagSet, name string) bool { }) return set } + +// GetBoolFlagVal returns the value of the a given bool flag if it is explicitly set +// in the cmd line arguments, otherwise returns nil. +func GetBoolFlagVal(fs *flag.FlagSet, flagName string) (*bool, error) { + if !IsSet(fs, flagName) { + return nil, nil + } + flagVal, parseErr := strconv.ParseBool(fs.Lookup(flagName).Value.String()) + if parseErr != nil { + return nil, parseErr + } + return &flagVal, nil +} diff --git a/vendor/go.etcd.io/etcd/pkg/v3/flags/strings.go b/vendor/go.etcd.io/etcd/pkg/v3/flags/strings.go index a80190658e479..e3d131f790251 100644 --- a/vendor/go.etcd.io/etcd/pkg/v3/flags/strings.go +++ b/vendor/go.etcd.io/etcd/pkg/v3/flags/strings.go @@ -49,5 +49,5 @@ func NewStringsValue(s string) (ss *StringsValue) { // StringsFromFlag returns a string slice from the flag. func StringsFromFlag(fs *flag.FlagSet, flagName string) []string { - return []string(*fs.Lookup(flagName).Value.(*StringsValue)) + return *fs.Lookup(flagName).Value.(*StringsValue) } diff --git a/vendor/go.etcd.io/etcd/pkg/v3/flags/urls.go b/vendor/go.etcd.io/etcd/pkg/v3/flags/urls.go index 885d32f457af4..27db58743be32 100644 --- a/vendor/go.etcd.io/etcd/pkg/v3/flags/urls.go +++ b/vendor/go.etcd.io/etcd/pkg/v3/flags/urls.go @@ -62,5 +62,5 @@ func NewURLsValue(s string) *URLsValue { // URLsFromFlag returns a slices from url got from the flag. func URLsFromFlag(fs *flag.FlagSet, urlsFlagName string) []url.URL { - return []url.URL(*fs.Lookup(urlsFlagName).Value.(*URLsValue)) + return *fs.Lookup(urlsFlagName).Value.(*URLsValue) } diff --git a/vendor/go.etcd.io/etcd/pkg/v3/httputil/httputil.go b/vendor/go.etcd.io/etcd/pkg/v3/httputil/httputil.go index 3bf58a3a1dc12..41758138a47d9 100644 --- a/vendor/go.etcd.io/etcd/pkg/v3/httputil/httputil.go +++ b/vendor/go.etcd.io/etcd/pkg/v3/httputil/httputil.go @@ -21,7 +21,6 @@ package httputil import ( "io" - "io/ioutil" "net" "net/http" ) @@ -31,7 +30,7 @@ import ( // therefore available for reuse. // Borrowed from golang/net/context/ctxhttp/cancelreq.go. func GracefulClose(resp *http.Response) { - io.Copy(ioutil.Discard, resp.Body) + io.Copy(io.Discard, resp.Body) resp.Body.Close() } diff --git a/vendor/go.etcd.io/etcd/pkg/v3/ioutil/pagewriter.go b/vendor/go.etcd.io/etcd/pkg/v3/ioutil/pagewriter.go index 62eb5cd43e50f..ebab6487e08f0 100644 --- a/vendor/go.etcd.io/etcd/pkg/v3/ioutil/pagewriter.go +++ b/vendor/go.etcd.io/etcd/pkg/v3/ioutil/pagewriter.go @@ -15,8 +15,9 @@ package ioutil import ( - "fmt" "io" + + "go.etcd.io/etcd/client/pkg/v3/verify" ) var defaultBufferBytes = 128 * 1024 @@ -42,9 +43,7 @@ type PageWriter struct { // NewPageWriter creates a new PageWriter. pageBytes is the number of bytes // to write per page. pageOffset is the starting offset of io.Writer. func NewPageWriter(w io.Writer, pageBytes, pageOffset int) *PageWriter { - if pageBytes <= 0 { - panic(fmt.Sprintf("assertion failed: invalid pageBytes (%d) value, it must be greater than 0", pageBytes)) - } + verify.Assert(pageBytes > 0, "invalid pageBytes (%d) value, it must be greater than 0", pageBytes) return &PageWriter{ w: w, pageOffset: pageOffset, @@ -105,11 +104,6 @@ func (pw *PageWriter) Flush() error { return err } -// FlushN flushes buffered data and returns the number of written bytes. -func (pw *PageWriter) FlushN() (int, error) { - return pw.flush() -} - func (pw *PageWriter) flush() (int, error) { if pw.bufferedBytes == 0 { return 0, nil diff --git a/vendor/go.etcd.io/etcd/pkg/v3/netutil/netutil.go b/vendor/go.etcd.io/etcd/pkg/v3/netutil/netutil.go index 689927b4c6763..0f1a685855c8c 100644 --- a/vendor/go.etcd.io/etcd/pkg/v3/netutil/netutil.go +++ b/vendor/go.etcd.io/etcd/pkg/v3/netutil/netutil.go @@ -16,6 +16,7 @@ package netutil import ( "context" + "errors" "fmt" "net" "net/url" @@ -23,9 +24,9 @@ import ( "sort" "time" - "go.etcd.io/etcd/client/pkg/v3/types" - "go.uber.org/zap" + + "go.etcd.io/etcd/client/pkg/v3/types" ) // indirection for testing @@ -70,14 +71,14 @@ func resolveTCPAddrs(ctx context.Context, lg *zap.Logger, urls [][]url.URL) ([][ for i, u := range us { nu, err := url.Parse(u.String()) if err != nil { - return nil, fmt.Errorf("failed to parse %q (%v)", u.String(), err) + return nil, fmt.Errorf("failed to parse %q (%w)", u.String(), err) } nus[i] = *nu } for i, u := range nus { h, err := resolveURL(ctx, lg, u) if err != nil { - return nil, fmt.Errorf("failed to resolve %q (%v)", u.String(), err) + return nil, fmt.Errorf("failed to resolve %q (%w)", u.String(), err) } if h != "" { nus[i].Host = h @@ -217,6 +218,6 @@ func stringsToURLs(us []string) ([]url.URL, error) { } func IsNetworkTimeoutError(err error) bool { - nerr, ok := err.(net.Error) - return ok && nerr.Timeout() + var nerr net.Error + return errors.As(err, &nerr) && nerr.Timeout() } diff --git a/vendor/go.etcd.io/etcd/pkg/v3/netutil/routes.go b/vendor/go.etcd.io/etcd/pkg/v3/netutil/routes.go index f66719ea16321..a7d67df3d4161 100644 --- a/vendor/go.etcd.io/etcd/pkg/v3/netutil/routes.go +++ b/vendor/go.etcd.io/etcd/pkg/v3/netutil/routes.go @@ -13,7 +13,6 @@ // limitations under the License. //go:build !linux -// +build !linux package netutil diff --git a/vendor/go.etcd.io/etcd/pkg/v3/netutil/routes_linux.go b/vendor/go.etcd.io/etcd/pkg/v3/netutil/routes_linux.go index 5f8bd0c4889a5..f55899b0bdafd 100644 --- a/vendor/go.etcd.io/etcd/pkg/v3/netutil/routes_linux.go +++ b/vendor/go.etcd.io/etcd/pkg/v3/netutil/routes_linux.go @@ -13,7 +13,6 @@ // limitations under the License. //go:build linux -// +build linux package netutil @@ -22,7 +21,7 @@ import ( "encoding/binary" "fmt" "net" - "sort" + "slices" "syscall" "go.etcd.io/etcd/pkg/v3/cpuutil" @@ -49,14 +48,13 @@ func GetDefaultHost() (string, error) { } // sort so choice is deterministic - var families []int + var families []uint8 for family := range rmsgs { - families = append(families, int(family)) + families = append(families, family) } - sort.Ints(families) + slices.Sort(families) - for _, f := range families { - family := uint8(f) + for _, family := range families { if host, err := chooseHost(family, rmsgs[family]); host != "" || err != nil { return host, err } @@ -154,7 +152,6 @@ func getIfaceAddr(idx uint32, family uint8) (*syscall.NetlinkMessage, error) { } return nil, fmt.Errorf("could not find address for interface index %v", idx) - } // Used to get a name of interface. diff --git a/vendor/go.etcd.io/etcd/pkg/v3/notify/notify.go b/vendor/go.etcd.io/etcd/pkg/v3/notify/notify.go new file mode 100644 index 0000000000000..8925a1ea21817 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/v3/notify/notify.go @@ -0,0 +1,52 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package notify + +import ( + "sync" +) + +// Notifier is a thread safe struct that can be used to send notification about +// some event to multiple consumers. +type Notifier struct { + mu sync.RWMutex + channel chan struct{} +} + +// NewNotifier returns new notifier +func NewNotifier() *Notifier { + return &Notifier{ + channel: make(chan struct{}), + } +} + +// Receive returns channel that can be used to wait for notification. +// Consumers will be informed by closing the channel. +func (n *Notifier) Receive() <-chan struct{} { + n.mu.RLock() + defer n.mu.RUnlock() + return n.channel +} + +// Notify closes the channel passed to consumers and creates new channel to used +// for next notification. +func (n *Notifier) Notify() { + newChannel := make(chan struct{}) + n.mu.Lock() + channelToClose := n.channel + n.channel = newChannel + n.mu.Unlock() + close(channelToClose) +} diff --git a/vendor/go.etcd.io/etcd/pkg/v3/runtime/fds_other.go b/vendor/go.etcd.io/etcd/pkg/v3/runtime/fds_other.go index 034f3d42646bf..2311bb1972597 100644 --- a/vendor/go.etcd.io/etcd/pkg/v3/runtime/fds_other.go +++ b/vendor/go.etcd.io/etcd/pkg/v3/runtime/fds_other.go @@ -13,7 +13,6 @@ // limitations under the License. //go:build !linux -// +build !linux package runtime diff --git a/vendor/go.etcd.io/etcd/pkg/v3/schedule/schedule.go b/vendor/go.etcd.io/etcd/pkg/v3/schedule/schedule.go index 234d01989dfb2..06a243df91407 100644 --- a/vendor/go.etcd.io/etcd/pkg/v3/schedule/schedule.go +++ b/vendor/go.etcd.io/etcd/pkg/v3/schedule/schedule.go @@ -17,9 +17,36 @@ package schedule import ( "context" "sync" + + "go.uber.org/zap" + + "go.etcd.io/etcd/client/pkg/v3/verify" ) -type Job func(context.Context) +type Job interface { + Name() string + Do(context.Context) +} + +type job struct { + name string + do func(context.Context) +} + +func (j job) Name() string { + return j.name +} + +func (j job) Do(ctx context.Context) { + j.do(ctx) +} + +func NewJob(name string, do func(ctx context.Context)) Job { + return job{ + name: name, + do: do, + } +} // Scheduler can schedule jobs. type Scheduler interface { @@ -56,14 +83,18 @@ type fifo struct { finishCond *sync.Cond donec chan struct{} + lg *zap.Logger } // NewFIFOScheduler returns a Scheduler that schedules jobs in FIFO // order sequentially -func NewFIFOScheduler() Scheduler { +func NewFIFOScheduler(lg *zap.Logger) Scheduler { + verify.Assert(lg != nil, "the logger should not be nil") + f := &fifo{ resume: make(chan struct{}, 1), donec: make(chan struct{}, 1), + lg: lg, } f.finishCond = sync.NewCond(&f.mu) f.ctx, f.cancel = context.WithCancel(context.Background()) @@ -125,7 +156,6 @@ func (f *fifo) Stop() { } func (f *fifo) run() { - // TODO: recover from job panic? defer func() { close(f.donec) close(f.resume) @@ -149,17 +179,29 @@ func (f *fifo) run() { f.mu.Unlock() // clean up pending jobs for _, todo := range pendings { - todo(f.ctx) + f.executeJob(todo, true) } return } } else { - todo(f.ctx) + f.executeJob(todo, false) + } + } +} + +func (f *fifo) executeJob(todo Job, updatedFinishedStats bool) { + defer func() { + if !updatedFinishedStats { f.finishCond.L.Lock() f.finished++ f.pendings = f.pendings[1:] f.finishCond.Broadcast() f.finishCond.L.Unlock() } - } + if err := recover(); err != nil { + f.lg.Panic("execute job failed", zap.String("job", todo.Name()), zap.Any("panic", err)) + } + }() + + todo.Do(f.ctx) } diff --git a/vendor/go.etcd.io/etcd/pkg/v3/traceutil/trace.go b/vendor/go.etcd.io/etcd/pkg/v3/traceutil/trace.go index bdd8e9b66a20a..0f36323bd19e1 100644 --- a/vendor/go.etcd.io/etcd/pkg/v3/traceutil/trace.go +++ b/vendor/go.etcd.io/etcd/pkg/v3/traceutil/trace.go @@ -16,24 +16,25 @@ package traceutil import ( - "bytes" "context" "fmt" "math/rand" + "strings" "time" "go.uber.org/zap" ) -const ( - TraceKey = "trace" - StartTimeKey = "startTime" -) +// TraceKey is used as a key of context for Trace. +type TraceKey struct{} + +// StartTimeKey is used as a key of context for start time of operation. +type StartTimeKey struct{} // Field is a kv pair to record additional details of the trace. type Field struct { Key string - Value interface{} + Value any } func (f *Field) format() string { @@ -44,7 +45,7 @@ func writeFields(fields []Field) string { if len(fields) == 0 { return "" } - var buf bytes.Buffer + var buf strings.Builder buf.WriteString("{") for _, f := range fields { buf.WriteString(f.format()) @@ -81,7 +82,7 @@ func TODO() *Trace { } func Get(ctx context.Context) *Trace { - if trace, ok := ctx.Value(TraceKey).(*Trace); ok && trace != nil { + if trace, ok := ctx.Value(TraceKey{}).(*Trace); ok && trace != nil { return trace } return TODO() @@ -181,33 +182,33 @@ func (t *Trace) logInfo(threshold time.Duration) (string, []zap.Field) { var steps []string lastStepTime := t.startTime for i := 0; i < len(t.steps); i++ { - step := t.steps[i] + tstep := t.steps[i] // add subtrace common fields which defined at the beginning to each sub-steps - if step.isSubTraceStart { + if tstep.isSubTraceStart { for j := i + 1; j < len(t.steps) && !t.steps[j].isSubTraceEnd; j++ { - t.steps[j].fields = append(step.fields, t.steps[j].fields...) + t.steps[j].fields = append(tstep.fields, t.steps[j].fields...) } continue } // add subtrace common fields which defined at the end to each sub-steps - if step.isSubTraceEnd { + if tstep.isSubTraceEnd { for j := i - 1; j >= 0 && !t.steps[j].isSubTraceStart; j-- { - t.steps[j].fields = append(step.fields, t.steps[j].fields...) + t.steps[j].fields = append(tstep.fields, t.steps[j].fields...) } continue } } for i := 0; i < len(t.steps); i++ { - step := t.steps[i] - if step.isSubTraceStart || step.isSubTraceEnd { + tstep := t.steps[i] + if tstep.isSubTraceStart || tstep.isSubTraceEnd { continue } - stepDuration := step.time.Sub(lastStepTime) + stepDuration := tstep.time.Sub(lastStepTime) if stepDuration > threshold { steps = append(steps, fmt.Sprintf("trace[%d] '%v' %s (duration: %v)", - traceNum, step.msg, writeFields(step.fields), stepDuration)) + traceNum, tstep.msg, writeFields(tstep.fields), stepDuration)) } - lastStepTime = step.time + lastStepTime = tstep.time } fs := []zap.Field{zap.String("detail", writeFields(t.fields)), diff --git a/vendor/go.etcd.io/etcd/pkg/v3/wait/wait.go b/vendor/go.etcd.io/etcd/pkg/v3/wait/wait.go index a3e2aec7cb5ba..8989f32d57938 100644 --- a/vendor/go.etcd.io/etcd/pkg/v3/wait/wait.go +++ b/vendor/go.etcd.io/etcd/pkg/v3/wait/wait.go @@ -34,9 +34,9 @@ type Wait interface { // Register waits returns a chan that waits on the given ID. // The chan will be triggered when Trigger is called with // the same ID. - Register(id uint64) <-chan interface{} + Register(id uint64) <-chan any // Trigger triggers the waiting chans with the given ID. - Trigger(id uint64, x interface{}) + Trigger(id uint64, x any) IsRegistered(id uint64) bool } @@ -46,7 +46,7 @@ type list struct { type listElement struct { l sync.RWMutex - m map[uint64]chan interface{} + m map[uint64]chan any } // New creates a Wait. @@ -55,14 +55,14 @@ func New() Wait { e: make([]listElement, defaultListElementLength), } for i := 0; i < len(res.e); i++ { - res.e[i].m = make(map[uint64]chan interface{}) + res.e[i].m = make(map[uint64]chan any) } return &res } -func (w *list) Register(id uint64) <-chan interface{} { +func (w *list) Register(id uint64) <-chan any { idx := id % defaultListElementLength - newCh := make(chan interface{}, 1) + newCh := make(chan any, 1) w.e[idx].l.Lock() defer w.e[idx].l.Unlock() if _, ok := w.e[idx].m[id]; !ok { @@ -73,7 +73,7 @@ func (w *list) Register(id uint64) <-chan interface{} { return newCh } -func (w *list) Trigger(id uint64, x interface{}) { +func (w *list) Trigger(id uint64, x any) { idx := id % defaultListElementLength w.e[idx].l.Lock() ch := w.e[idx].m[id] @@ -94,17 +94,17 @@ func (w *list) IsRegistered(id uint64) bool { } type waitWithResponse struct { - ch <-chan interface{} + ch <-chan any } -func NewWithResponse(ch <-chan interface{}) Wait { +func NewWithResponse(ch <-chan any) Wait { return &waitWithResponse{ch: ch} } -func (w *waitWithResponse) Register(id uint64) <-chan interface{} { +func (w *waitWithResponse) Register(id uint64) <-chan any { return w.ch } -func (w *waitWithResponse) Trigger(id uint64, x interface{}) {} +func (w *waitWithResponse) Trigger(id uint64, x any) {} func (w *waitWithResponse) IsRegistered(id uint64) bool { panic("waitWithResponse.IsRegistered() shouldn't be called") } diff --git a/vendor/go.etcd.io/etcd/pkg/v3/wait/wait_time.go b/vendor/go.etcd.io/etcd/pkg/v3/wait/wait_time.go index 297e48a47d76f..131788959e6e6 100644 --- a/vendor/go.etcd.io/etcd/pkg/v3/wait/wait_time.go +++ b/vendor/go.etcd.io/etcd/pkg/v3/wait/wait_time.go @@ -19,9 +19,9 @@ import "sync" type WaitTime interface { // Wait returns a chan that waits on the given logical deadline. // The chan will be triggered when Trigger is called with a - // deadline that is later than the one it is waiting for. + // deadline that is later than or equal to the one it is waiting for. Wait(deadline uint64) <-chan struct{} - // Trigger triggers all the waiting chans with an earlier logical deadline. + // Trigger triggers all the waiting chans with an equal or earlier logical deadline. Trigger(deadline uint64) } diff --git a/vendor/go.etcd.io/etcd/raft/v3/OWNERS b/vendor/go.etcd.io/etcd/raft/v3/OWNERS deleted file mode 100644 index ab781066e2374..0000000000000 --- a/vendor/go.etcd.io/etcd/raft/v3/OWNERS +++ /dev/null @@ -1,19 +0,0 @@ -approvers: -- heyitsanthony -- philips -- fanminshi -- gyuho -- mitake -- jpbetz -- xiang90 -- bdarnell -reviewers: -- heyitsanthony -- philips -- fanminshi -- gyuho -- mitake -- jpbetz -- xiang90 -- bdarnell -- tschottdorf diff --git a/vendor/go.etcd.io/etcd/raft/v3/log.go b/vendor/go.etcd.io/etcd/raft/v3/log.go deleted file mode 100644 index c94c41f7783e3..0000000000000 --- a/vendor/go.etcd.io/etcd/raft/v3/log.go +++ /dev/null @@ -1,406 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package raft - -import ( - "fmt" - "log" - - pb "go.etcd.io/etcd/raft/v3/raftpb" -) - -type raftLog struct { - // storage contains all stable entries since the last snapshot. - storage Storage - - // unstable contains all unstable entries and snapshot. - // they will be saved into storage. - unstable unstable - - // committed is the highest log position that is known to be in - // stable storage on a quorum of nodes. - committed uint64 - // applied is the highest log position that the application has - // been instructed to apply to its state machine. - // Invariant: applied <= committed - applied uint64 - - logger Logger - - // maxNextEntsSize is the maximum number aggregate byte size of the messages - // returned from calls to nextEnts. - maxNextEntsSize uint64 -} - -// newLog returns log using the given storage and default options. It -// recovers the log to the state that it just commits and applies the -// latest snapshot. -func newLog(storage Storage, logger Logger) *raftLog { - return newLogWithSize(storage, logger, noLimit) -} - -// newLogWithSize returns a log using the given storage and max -// message size. -func newLogWithSize(storage Storage, logger Logger, maxNextEntsSize uint64) *raftLog { - if storage == nil { - log.Panic("storage must not be nil") - } - log := &raftLog{ - storage: storage, - logger: logger, - maxNextEntsSize: maxNextEntsSize, - } - firstIndex, err := storage.FirstIndex() - if err != nil { - panic(err) // TODO(bdarnell) - } - lastIndex, err := storage.LastIndex() - if err != nil { - panic(err) // TODO(bdarnell) - } - log.unstable.offset = lastIndex + 1 - log.unstable.logger = logger - // Initialize our committed and applied pointers to the time of the last compaction. - log.committed = firstIndex - 1 - log.applied = firstIndex - 1 - - return log -} - -func (l *raftLog) String() string { - return fmt.Sprintf("committed=%d, applied=%d, unstable.offset=%d, len(unstable.Entries)=%d", l.committed, l.applied, l.unstable.offset, len(l.unstable.entries)) -} - -// maybeAppend returns (0, false) if the entries cannot be appended. Otherwise, -// it returns (last index of new entries, true). -func (l *raftLog) maybeAppend(index, logTerm, committed uint64, ents ...pb.Entry) (lastnewi uint64, ok bool) { - if l.matchTerm(index, logTerm) { - lastnewi = index + uint64(len(ents)) - ci := l.findConflict(ents) - switch { - case ci == 0: - case ci <= l.committed: - l.logger.Panicf("entry %d conflict with committed entry [committed(%d)]", ci, l.committed) - default: - offset := index + 1 - l.append(ents[ci-offset:]...) - } - l.commitTo(min(committed, lastnewi)) - return lastnewi, true - } - return 0, false -} - -func (l *raftLog) append(ents ...pb.Entry) uint64 { - if len(ents) == 0 { - return l.lastIndex() - } - if after := ents[0].Index - 1; after < l.committed { - l.logger.Panicf("after(%d) is out of range [committed(%d)]", after, l.committed) - } - l.unstable.truncateAndAppend(ents) - return l.lastIndex() -} - -// findConflict finds the index of the conflict. -// It returns the first pair of conflicting entries between the existing -// entries and the given entries, if there are any. -// If there is no conflicting entries, and the existing entries contains -// all the given entries, zero will be returned. -// If there is no conflicting entries, but the given entries contains new -// entries, the index of the first new entry will be returned. -// An entry is considered to be conflicting if it has the same index but -// a different term. -// The index of the given entries MUST be continuously increasing. -func (l *raftLog) findConflict(ents []pb.Entry) uint64 { - for _, ne := range ents { - if !l.matchTerm(ne.Index, ne.Term) { - if ne.Index <= l.lastIndex() { - l.logger.Infof("found conflict at index %d [existing term: %d, conflicting term: %d]", - ne.Index, l.zeroTermOnErrCompacted(l.term(ne.Index)), ne.Term) - } - return ne.Index - } - } - return 0 -} - -// findConflictByTerm takes an (index, term) pair (indicating a conflicting log -// entry on a leader/follower during an append) and finds the largest index in -// log l with a term <= `term` and an index <= `index`. If no such index exists -// in the log, the log's first index is returned. -// -// The index provided MUST be equal to or less than l.lastIndex(). Invalid -// inputs log a warning and the input index is returned. -func (l *raftLog) findConflictByTerm(index uint64, term uint64) uint64 { - if li := l.lastIndex(); index > li { - // NB: such calls should not exist, but since there is a straightfoward - // way to recover, do it. - // - // It is tempting to also check something about the first index, but - // there is odd behavior with peers that have no log, in which case - // lastIndex will return zero and firstIndex will return one, which - // leads to calls with an index of zero into this method. - l.logger.Warningf("index(%d) is out of range [0, lastIndex(%d)] in findConflictByTerm", - index, li) - return index - } - for { - logTerm, err := l.term(index) - if logTerm <= term || err != nil { - break - } - index-- - } - return index -} - -func (l *raftLog) unstableEntries() []pb.Entry { - if len(l.unstable.entries) == 0 { - return nil - } - return l.unstable.entries -} - -// nextEnts returns all the available entries for execution. -// If applied is smaller than the index of snapshot, it returns all committed -// entries after the index of snapshot. -func (l *raftLog) nextEnts() (ents []pb.Entry) { - off := max(l.applied+1, l.firstIndex()) - if l.committed+1 > off { - ents, err := l.slice(off, l.committed+1, l.maxNextEntsSize) - if err != nil { - l.logger.Panicf("unexpected error when getting unapplied entries (%v)", err) - } - return ents - } - return nil -} - -// hasNextEnts returns if there is any available entries for execution. This -// is a fast check without heavy raftLog.slice() in raftLog.nextEnts(). -func (l *raftLog) hasNextEnts() bool { - off := max(l.applied+1, l.firstIndex()) - return l.committed+1 > off -} - -// hasPendingSnapshot returns if there is pending snapshot waiting for applying. -func (l *raftLog) hasPendingSnapshot() bool { - return l.unstable.snapshot != nil && !IsEmptySnap(*l.unstable.snapshot) -} - -func (l *raftLog) snapshot() (pb.Snapshot, error) { - if l.unstable.snapshot != nil { - return *l.unstable.snapshot, nil - } - return l.storage.Snapshot() -} - -func (l *raftLog) firstIndex() uint64 { - if i, ok := l.unstable.maybeFirstIndex(); ok { - return i - } - index, err := l.storage.FirstIndex() - if err != nil { - panic(err) // TODO(bdarnell) - } - return index -} - -func (l *raftLog) lastIndex() uint64 { - if i, ok := l.unstable.maybeLastIndex(); ok { - return i - } - i, err := l.storage.LastIndex() - if err != nil { - panic(err) // TODO(bdarnell) - } - return i -} - -func (l *raftLog) commitTo(tocommit uint64) { - // never decrease commit - if l.committed < tocommit { - if l.lastIndex() < tocommit { - l.logger.Panicf("tocommit(%d) is out of range [lastIndex(%d)]. Was the raft log corrupted, truncated, or lost?", tocommit, l.lastIndex()) - } - l.committed = tocommit - } -} - -func (l *raftLog) appliedTo(i uint64) { - if i == 0 { - return - } - if l.committed < i || i < l.applied { - l.logger.Panicf("applied(%d) is out of range [prevApplied(%d), committed(%d)]", i, l.applied, l.committed) - } - l.applied = i -} - -func (l *raftLog) stableTo(i, t uint64) { l.unstable.stableTo(i, t) } - -func (l *raftLog) stableSnapTo(i uint64) { l.unstable.stableSnapTo(i) } - -func (l *raftLog) lastTerm() uint64 { - t, err := l.term(l.lastIndex()) - if err != nil { - l.logger.Panicf("unexpected error when getting the last term (%v)", err) - } - return t -} - -func (l *raftLog) term(i uint64) (uint64, error) { - // the valid term range is [index of dummy entry, last index] - dummyIndex := l.firstIndex() - 1 - if i < dummyIndex || i > l.lastIndex() { - // TODO: return an error instead? - return 0, nil - } - - if t, ok := l.unstable.maybeTerm(i); ok { - return t, nil - } - - t, err := l.storage.Term(i) - if err == nil { - return t, nil - } - if err == ErrCompacted || err == ErrUnavailable { - return 0, err - } - panic(err) // TODO(bdarnell) -} - -func (l *raftLog) entries(i, maxsize uint64) ([]pb.Entry, error) { - if i > l.lastIndex() { - return nil, nil - } - return l.slice(i, l.lastIndex()+1, maxsize) -} - -// allEntries returns all entries in the log. -func (l *raftLog) allEntries() []pb.Entry { - ents, err := l.entries(l.firstIndex(), noLimit) - if err == nil { - return ents - } - if err == ErrCompacted { // try again if there was a racing compaction - return l.allEntries() - } - // TODO (xiangli): handle error? - panic(err) -} - -// isUpToDate determines if the given (lastIndex,term) log is more up-to-date -// by comparing the index and term of the last entries in the existing logs. -// If the logs have last entries with different terms, then the log with the -// later term is more up-to-date. If the logs end with the same term, then -// whichever log has the larger lastIndex is more up-to-date. If the logs are -// the same, the given log is up-to-date. -func (l *raftLog) isUpToDate(lasti, term uint64) bool { - return term > l.lastTerm() || (term == l.lastTerm() && lasti >= l.lastIndex()) -} - -func (l *raftLog) matchTerm(i, term uint64) bool { - t, err := l.term(i) - if err != nil { - return false - } - return t == term -} - -func (l *raftLog) maybeCommit(maxIndex, term uint64) bool { - if maxIndex > l.committed && l.zeroTermOnErrCompacted(l.term(maxIndex)) == term { - l.commitTo(maxIndex) - return true - } - return false -} - -func (l *raftLog) restore(s pb.Snapshot) { - l.logger.Infof("log [%s] starts to restore snapshot [index: %d, term: %d]", l, s.Metadata.Index, s.Metadata.Term) - l.committed = s.Metadata.Index - l.unstable.restore(s) -} - -// slice returns a slice of log entries from lo through hi-1, inclusive. -func (l *raftLog) slice(lo, hi, maxSize uint64) ([]pb.Entry, error) { - err := l.mustCheckOutOfBounds(lo, hi) - if err != nil { - return nil, err - } - if lo == hi { - return nil, nil - } - var ents []pb.Entry - if lo < l.unstable.offset { - storedEnts, err := l.storage.Entries(lo, min(hi, l.unstable.offset), maxSize) - if err == ErrCompacted { - return nil, err - } else if err == ErrUnavailable { - l.logger.Panicf("entries[%d:%d) is unavailable from storage", lo, min(hi, l.unstable.offset)) - } else if err != nil { - panic(err) // TODO(bdarnell) - } - - // check if ents has reached the size limitation - if uint64(len(storedEnts)) < min(hi, l.unstable.offset)-lo { - return storedEnts, nil - } - - ents = storedEnts - } - if hi > l.unstable.offset { - unstable := l.unstable.slice(max(lo, l.unstable.offset), hi) - if len(ents) > 0 { - combined := make([]pb.Entry, len(ents)+len(unstable)) - n := copy(combined, ents) - copy(combined[n:], unstable) - ents = combined - } else { - ents = unstable - } - } - return limitSize(ents, maxSize), nil -} - -// l.firstIndex <= lo <= hi <= l.firstIndex + len(l.entries) -func (l *raftLog) mustCheckOutOfBounds(lo, hi uint64) error { - if lo > hi { - l.logger.Panicf("invalid slice %d > %d", lo, hi) - } - fi := l.firstIndex() - if lo < fi { - return ErrCompacted - } - - length := l.lastIndex() + 1 - fi - if hi > fi+length { - l.logger.Panicf("slice[%d,%d) out of bound [%d,%d]", lo, hi, fi, l.lastIndex()) - } - return nil -} - -func (l *raftLog) zeroTermOnErrCompacted(t uint64, err error) uint64 { - if err == nil { - return t - } - if err == ErrCompacted { - return 0 - } - l.logger.Panicf("unexpected error (%v)", err) - return 0 -} diff --git a/vendor/go.etcd.io/etcd/raft/v3/log_unstable.go b/vendor/go.etcd.io/etcd/raft/v3/log_unstable.go deleted file mode 100644 index 230fd21f99448..0000000000000 --- a/vendor/go.etcd.io/etcd/raft/v3/log_unstable.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package raft - -import pb "go.etcd.io/etcd/raft/v3/raftpb" - -// unstable.entries[i] has raft log position i+unstable.offset. -// Note that unstable.offset may be less than the highest log -// position in storage; this means that the next write to storage -// might need to truncate the log before persisting unstable.entries. -type unstable struct { - // the incoming unstable snapshot, if any. - snapshot *pb.Snapshot - // all entries that have not yet been written to storage. - entries []pb.Entry - offset uint64 - - logger Logger -} - -// maybeFirstIndex returns the index of the first possible entry in entries -// if it has a snapshot. -func (u *unstable) maybeFirstIndex() (uint64, bool) { - if u.snapshot != nil { - return u.snapshot.Metadata.Index + 1, true - } - return 0, false -} - -// maybeLastIndex returns the last index if it has at least one -// unstable entry or snapshot. -func (u *unstable) maybeLastIndex() (uint64, bool) { - if l := len(u.entries); l != 0 { - return u.offset + uint64(l) - 1, true - } - if u.snapshot != nil { - return u.snapshot.Metadata.Index, true - } - return 0, false -} - -// maybeTerm returns the term of the entry at index i, if there -// is any. -func (u *unstable) maybeTerm(i uint64) (uint64, bool) { - if i < u.offset { - if u.snapshot != nil && u.snapshot.Metadata.Index == i { - return u.snapshot.Metadata.Term, true - } - return 0, false - } - - last, ok := u.maybeLastIndex() - if !ok { - return 0, false - } - if i > last { - return 0, false - } - - return u.entries[i-u.offset].Term, true -} - -func (u *unstable) stableTo(i, t uint64) { - gt, ok := u.maybeTerm(i) - if !ok { - return - } - // if i < offset, term is matched with the snapshot - // only update the unstable entries if term is matched with - // an unstable entry. - if gt == t && i >= u.offset { - u.entries = u.entries[i+1-u.offset:] - u.offset = i + 1 - u.shrinkEntriesArray() - } -} - -// shrinkEntriesArray discards the underlying array used by the entries slice -// if most of it isn't being used. This avoids holding references to a bunch of -// potentially large entries that aren't needed anymore. Simply clearing the -// entries wouldn't be safe because clients might still be using them. -func (u *unstable) shrinkEntriesArray() { - // We replace the array if we're using less than half of the space in - // it. This number is fairly arbitrary, chosen as an attempt to balance - // memory usage vs number of allocations. It could probably be improved - // with some focused tuning. - const lenMultiple = 2 - if len(u.entries) == 0 { - u.entries = nil - } else if len(u.entries)*lenMultiple < cap(u.entries) { - newEntries := make([]pb.Entry, len(u.entries)) - copy(newEntries, u.entries) - u.entries = newEntries - } -} - -func (u *unstable) stableSnapTo(i uint64) { - if u.snapshot != nil && u.snapshot.Metadata.Index == i { - u.snapshot = nil - } -} - -func (u *unstable) restore(s pb.Snapshot) { - u.offset = s.Metadata.Index + 1 - u.entries = nil - u.snapshot = &s -} - -func (u *unstable) truncateAndAppend(ents []pb.Entry) { - after := ents[0].Index - switch { - case after == u.offset+uint64(len(u.entries)): - // after is the next index in the u.entries - // directly append - u.entries = append(u.entries, ents...) - case after <= u.offset: - u.logger.Infof("replace the unstable entries from index %d", after) - // The log is being truncated to before our current offset - // portion, so set the offset and replace the entries - u.offset = after - u.entries = ents - default: - // truncate to after and copy to u.entries - // then append - u.logger.Infof("truncate the unstable entries before index %d", after) - u.entries = append([]pb.Entry{}, u.slice(u.offset, after)...) - u.entries = append(u.entries, ents...) - } -} - -func (u *unstable) slice(lo uint64, hi uint64) []pb.Entry { - u.mustCheckOutOfBounds(lo, hi) - return u.entries[lo-u.offset : hi-u.offset] -} - -// u.offset <= lo <= hi <= u.offset+len(u.entries) -func (u *unstable) mustCheckOutOfBounds(lo, hi uint64) { - if lo > hi { - u.logger.Panicf("invalid unstable.slice %d > %d", lo, hi) - } - upper := u.offset + uint64(len(u.entries)) - if lo < u.offset || hi > upper { - u.logger.Panicf("unstable.slice[%d,%d) out of bound [%d,%d]", lo, hi, u.offset, upper) - } -} diff --git a/vendor/go.etcd.io/etcd/raft/v3/rawnode.go b/vendor/go.etcd.io/etcd/raft/v3/rawnode.go deleted file mode 100644 index 4111d029dd600..0000000000000 --- a/vendor/go.etcd.io/etcd/raft/v3/rawnode.go +++ /dev/null @@ -1,241 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package raft - -import ( - "errors" - - pb "go.etcd.io/etcd/raft/v3/raftpb" - "go.etcd.io/etcd/raft/v3/tracker" -) - -// ErrStepLocalMsg is returned when try to step a local raft message -var ErrStepLocalMsg = errors.New("raft: cannot step raft local message") - -// ErrStepPeerNotFound is returned when try to step a response message -// but there is no peer found in raft.prs for that node. -var ErrStepPeerNotFound = errors.New("raft: cannot step as peer not found") - -// RawNode is a thread-unsafe Node. -// The methods of this struct correspond to the methods of Node and are described -// more fully there. -type RawNode struct { - raft *raft - prevSoftSt *SoftState - prevHardSt pb.HardState -} - -// NewRawNode instantiates a RawNode from the given configuration. -// -// See Bootstrap() for bootstrapping an initial state; this replaces the former -// 'peers' argument to this method (with identical behavior). However, It is -// recommended that instead of calling Bootstrap, applications bootstrap their -// state manually by setting up a Storage that has a first index > 1 and which -// stores the desired ConfState as its InitialState. -func NewRawNode(config *Config) (*RawNode, error) { - r := newRaft(config) - rn := &RawNode{ - raft: r, - } - rn.prevSoftSt = r.softState() - rn.prevHardSt = r.hardState() - return rn, nil -} - -// Tick advances the internal logical clock by a single tick. -func (rn *RawNode) Tick() { - rn.raft.tick() -} - -// TickQuiesced advances the internal logical clock by a single tick without -// performing any other state machine processing. It allows the caller to avoid -// periodic heartbeats and elections when all of the peers in a Raft group are -// known to be at the same state. Expected usage is to periodically invoke Tick -// or TickQuiesced depending on whether the group is "active" or "quiesced". -// -// WARNING: Be very careful about using this method as it subverts the Raft -// state machine. You should probably be using Tick instead. -func (rn *RawNode) TickQuiesced() { - rn.raft.electionElapsed++ -} - -// Campaign causes this RawNode to transition to candidate state. -func (rn *RawNode) Campaign() error { - return rn.raft.Step(pb.Message{ - Type: pb.MsgHup, - }) -} - -// Propose proposes data be appended to the raft log. -func (rn *RawNode) Propose(data []byte) error { - return rn.raft.Step(pb.Message{ - Type: pb.MsgProp, - From: rn.raft.id, - Entries: []pb.Entry{ - {Data: data}, - }}) -} - -// ProposeConfChange proposes a config change. See (Node).ProposeConfChange for -// details. -func (rn *RawNode) ProposeConfChange(cc pb.ConfChangeI) error { - m, err := confChangeToMsg(cc) - if err != nil { - return err - } - return rn.raft.Step(m) -} - -// ApplyConfChange applies a config change to the local node. The app must call -// this when it applies a configuration change, except when it decides to reject -// the configuration change, in which case no call must take place. -func (rn *RawNode) ApplyConfChange(cc pb.ConfChangeI) *pb.ConfState { - cs := rn.raft.applyConfChange(cc.AsV2()) - return &cs -} - -// Step advances the state machine using the given message. -func (rn *RawNode) Step(m pb.Message) error { - // ignore unexpected local messages receiving over network - if IsLocalMsg(m.Type) { - return ErrStepLocalMsg - } - if pr := rn.raft.prs.Progress[m.From]; pr != nil || !IsResponseMsg(m.Type) { - return rn.raft.Step(m) - } - return ErrStepPeerNotFound -} - -// Ready returns the outstanding work that the application needs to handle. This -// includes appending and applying entries or a snapshot, updating the HardState, -// and sending messages. The returned Ready() *must* be handled and subsequently -// passed back via Advance(). -func (rn *RawNode) Ready() Ready { - rd := rn.readyWithoutAccept() - rn.acceptReady(rd) - return rd -} - -// readyWithoutAccept returns a Ready. This is a read-only operation, i.e. there -// is no obligation that the Ready must be handled. -func (rn *RawNode) readyWithoutAccept() Ready { - return newReady(rn.raft, rn.prevSoftSt, rn.prevHardSt) -} - -// acceptReady is called when the consumer of the RawNode has decided to go -// ahead and handle a Ready. Nothing must alter the state of the RawNode between -// this call and the prior call to Ready(). -func (rn *RawNode) acceptReady(rd Ready) { - if rd.SoftState != nil { - rn.prevSoftSt = rd.SoftState - } - if len(rd.ReadStates) != 0 { - rn.raft.readStates = nil - } - rn.raft.msgs = nil -} - -// HasReady called when RawNode user need to check if any Ready pending. -// Checking logic in this method should be consistent with Ready.containsUpdates(). -func (rn *RawNode) HasReady() bool { - r := rn.raft - if !r.softState().equal(rn.prevSoftSt) { - return true - } - if hardSt := r.hardState(); !IsEmptyHardState(hardSt) && !isHardStateEqual(hardSt, rn.prevHardSt) { - return true - } - if r.raftLog.hasPendingSnapshot() { - return true - } - if len(r.msgs) > 0 || len(r.raftLog.unstableEntries()) > 0 || r.raftLog.hasNextEnts() { - return true - } - if len(r.readStates) != 0 { - return true - } - return false -} - -// Advance notifies the RawNode that the application has applied and saved progress in the -// last Ready results. -func (rn *RawNode) Advance(rd Ready) { - if !IsEmptyHardState(rd.HardState) { - rn.prevHardSt = rd.HardState - } - rn.raft.advance(rd) -} - -// Status returns the current status of the given group. This allocates, see -// BasicStatus and WithProgress for allocation-friendlier choices. -func (rn *RawNode) Status() Status { - status := getStatus(rn.raft) - return status -} - -// BasicStatus returns a BasicStatus. Notably this does not contain the -// Progress map; see WithProgress for an allocation-free way to inspect it. -func (rn *RawNode) BasicStatus() BasicStatus { - return getBasicStatus(rn.raft) -} - -// ProgressType indicates the type of replica a Progress corresponds to. -type ProgressType byte - -const ( - // ProgressTypePeer accompanies a Progress for a regular peer replica. - ProgressTypePeer ProgressType = iota - // ProgressTypeLearner accompanies a Progress for a learner replica. - ProgressTypeLearner -) - -// WithProgress is a helper to introspect the Progress for this node and its -// peers. -func (rn *RawNode) WithProgress(visitor func(id uint64, typ ProgressType, pr tracker.Progress)) { - rn.raft.prs.Visit(func(id uint64, pr *tracker.Progress) { - typ := ProgressTypePeer - if pr.IsLearner { - typ = ProgressTypeLearner - } - p := *pr - p.Inflights = nil - visitor(id, typ, p) - }) -} - -// ReportUnreachable reports the given node is not reachable for the last send. -func (rn *RawNode) ReportUnreachable(id uint64) { - _ = rn.raft.Step(pb.Message{Type: pb.MsgUnreachable, From: id}) -} - -// ReportSnapshot reports the status of the sent snapshot. -func (rn *RawNode) ReportSnapshot(id uint64, status SnapshotStatus) { - rej := status == SnapshotFailure - - _ = rn.raft.Step(pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej}) -} - -// TransferLeader tries to transfer leadership to the given transferee. -func (rn *RawNode) TransferLeader(transferee uint64) { - _ = rn.raft.Step(pb.Message{Type: pb.MsgTransferLeader, From: transferee}) -} - -// ReadIndex requests a read state. The read state will be set in ready. -// Read State has a read index. Once the application advances further than the read -// index, any linearizable read requests issued before the read request can be -// processed safely. The read state will have the same rctx attached. -func (rn *RawNode) ReadIndex(rctx []byte) { - _ = rn.raft.Step(pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}}) -} diff --git a/vendor/go.etcd.io/etcd/server/v3/auth/jwt.go b/vendor/go.etcd.io/etcd/server/v3/auth/jwt.go index a797794494bd8..18890de9c0b70 100644 --- a/vendor/go.etcd.io/etcd/server/v3/auth/jwt.go +++ b/vendor/go.etcd.io/etcd/server/v3/auth/jwt.go @@ -17,18 +17,19 @@ package auth import ( "context" "crypto/ecdsa" + "crypto/ed25519" "crypto/rsa" "errors" "time" - "github.com/golang-jwt/jwt/v4" + jwt "github.com/golang-jwt/jwt/v4" "go.uber.org/zap" ) type tokenJWT struct { lg *zap.Logger signMethod jwt.SigningMethod - key interface{} + key any ttl time.Duration verifyOnly bool } @@ -45,7 +46,7 @@ func (t *tokenJWT) info(ctx context.Context, token string, rev uint64) (*AuthInf revision float64 ) - parsed, err := jwt.Parse(token, func(token *jwt.Token) (interface{}, error) { + parsed, err := jwt.Parse(token, func(token *jwt.Token) (any, error) { if token.Method.Alg() != t.signMethod.Alg() { return nil, errors.New("invalid signing method") } @@ -54,6 +55,8 @@ func (t *tokenJWT) info(ctx context.Context, token string, rev uint64) (*AuthInf return &k.PublicKey, nil case *ecdsa.PrivateKey: return &k.PublicKey, nil + case ed25519.PrivateKey: + return k.Public(), nil default: return t.key, nil } @@ -62,7 +65,6 @@ func (t *tokenJWT) info(ctx context.Context, token string, rev uint64) (*AuthInf if err != nil { t.lg.Warn( "failed to parse a JWT token", - zap.String("token", token), zap.Error(err), ) return nil, false @@ -70,7 +72,7 @@ func (t *tokenJWT) info(ctx context.Context, token string, rev uint64) (*AuthInf claims, ok := parsed.Claims.(jwt.MapClaims) if !parsed.Valid || !ok { - t.lg.Warn("invalid JWT token", zap.String("token", token)) + t.lg.Warn("failed to obtain claims from a JWT token") return nil, false } @@ -162,6 +164,10 @@ func newTokenProviderJWT(lg *zap.Logger, optMap map[string]string) (*tokenJWT, e if _, ok := t.key.(*ecdsa.PublicKey); ok { t.verifyOnly = true } + case *jwt.SigningMethodEd25519: + if _, ok := t.key.(ed25519.PublicKey); ok { + t.verifyOnly = true + } case *jwt.SigningMethodRSA, *jwt.SigningMethodRSAPSS: if _, ok := t.key.(*rsa.PublicKey); ok { t.verifyOnly = true diff --git a/vendor/go.etcd.io/etcd/server/v3/auth/options.go b/vendor/go.etcd.io/etcd/server/v3/auth/options.go index 0b01d2c4a31d5..7ed4bf2f00227 100644 --- a/vendor/go.etcd.io/etcd/server/v3/auth/options.go +++ b/vendor/go.etcd.io/etcd/server/v3/auth/options.go @@ -15,13 +15,15 @@ package auth import ( + "crypto" "crypto/ecdsa" + "crypto/ed25519" "crypto/rsa" "fmt" - "io/ioutil" + "os" "time" - "github.com/golang-jwt/jwt/v4" + jwt "github.com/golang-jwt/jwt/v4" ) const ( @@ -70,14 +72,14 @@ func (opts *jwtOptions) Parse(optMap map[string]string) error { } if file := optMap[optPublicKey]; file != "" { - opts.PublicKey, err = ioutil.ReadFile(file) + opts.PublicKey, err = os.ReadFile(file) if err != nil { return err } } if file := optMap[optPrivateKey]; file != "" { - opts.PrivateKey, err = ioutil.ReadFile(file) + opts.PrivateKey, err = os.ReadFile(file) if err != nil { return err } @@ -94,12 +96,14 @@ func (opts *jwtOptions) Parse(optMap map[string]string) error { } // Key will parse and return the appropriately typed key for the selected signature method -func (opts *jwtOptions) Key() (interface{}, error) { +func (opts *jwtOptions) Key() (any, error) { switch opts.SignMethod.(type) { case *jwt.SigningMethodRSA, *jwt.SigningMethodRSAPSS: return opts.rsaKey() case *jwt.SigningMethodECDSA: return opts.ecKey() + case *jwt.SigningMethodEd25519: + return opts.edKey() case *jwt.SigningMethodHMAC: return opts.hmacKey() default: @@ -107,14 +111,14 @@ func (opts *jwtOptions) Key() (interface{}, error) { } } -func (opts *jwtOptions) hmacKey() (interface{}, error) { +func (opts *jwtOptions) hmacKey() (any, error) { if len(opts.PrivateKey) == 0 { return nil, ErrMissingKey } return opts.PrivateKey, nil } -func (opts *jwtOptions) rsaKey() (interface{}, error) { +func (opts *jwtOptions) rsaKey() (any, error) { var ( priv *rsa.PrivateKey pub *rsa.PublicKey @@ -145,14 +149,14 @@ func (opts *jwtOptions) rsaKey() (interface{}, error) { } // both keys provided, make sure they match - if pub != nil && pub.E != priv.E && pub.N.Cmp(priv.N) != 0 { + if pub != nil && !pub.Equal(priv.Public()) { return nil, ErrKeyMismatch } return priv, nil } -func (opts *jwtOptions) ecKey() (interface{}, error) { +func (opts *jwtOptions) ecKey() (any, error) { var ( priv *ecdsa.PrivateKey pub *ecdsa.PublicKey @@ -183,8 +187,49 @@ func (opts *jwtOptions) ecKey() (interface{}, error) { } // both keys provided, make sure they match - if pub != nil && pub.Curve != priv.Curve && - pub.X.Cmp(priv.X) != 0 && pub.Y.Cmp(priv.Y) != 0 { + if pub != nil && !pub.Equal(priv.Public()) { + return nil, ErrKeyMismatch + } + + return priv, nil +} + +func (opts *jwtOptions) edKey() (any, error) { + var ( + priv ed25519.PrivateKey + pub ed25519.PublicKey + err error + ) + + if len(opts.PrivateKey) > 0 { + var privKey crypto.PrivateKey + privKey, err = jwt.ParseEdPrivateKeyFromPEM(opts.PrivateKey) + if err != nil { + return nil, err + } + priv = privKey.(ed25519.PrivateKey) + } + + if len(opts.PublicKey) > 0 { + var pubKey crypto.PublicKey + pubKey, err = jwt.ParseEdPublicKeyFromPEM(opts.PublicKey) + if err != nil { + return nil, err + } + pub = pubKey.(ed25519.PublicKey) + } + + if priv == nil { + if pub == nil { + // Neither key given + return nil, ErrMissingKey + } + // Public key only, can verify tokens + return pub, nil + } + + // both keys provided, make sure they match + if pub != nil && !pub.Equal(priv.Public()) { return nil, ErrKeyMismatch } diff --git a/vendor/go.etcd.io/etcd/server/v3/auth/range_perm_cache.go b/vendor/go.etcd.io/etcd/server/v3/auth/range_perm_cache.go index e096710c32e9e..a540e121d0d1a 100644 --- a/vendor/go.etcd.io/etcd/server/v3/auth/range_perm_cache.go +++ b/vendor/go.etcd.io/etcd/server/v3/auth/range_perm_cache.go @@ -15,15 +15,14 @@ package auth import ( + "go.uber.org/zap" + "go.etcd.io/etcd/api/v3/authpb" "go.etcd.io/etcd/pkg/v3/adt" - "go.etcd.io/etcd/server/v3/mvcc/backend" - - "go.uber.org/zap" ) -func getMergedPerms(lg *zap.Logger, tx backend.ReadTx, userName string) *unifiedRangePermissions { - user := getUser(lg, tx, userName) +func getMergedPerms(tx UnsafeAuthReader, userName string) *unifiedRangePermissions { + user := tx.UnsafeGetUser(userName) if user == nil { return nil } @@ -32,7 +31,7 @@ func getMergedPerms(lg *zap.Logger, tx backend.ReadTx, userName string) *unified writePerms := adt.NewIntervalTree() for _, roleName := range user.Roles { - role := getRole(lg, tx, roleName) + role := tx.UnsafeGetRole(roleName) if role == nil { continue } @@ -108,6 +107,7 @@ func checkKeyPoint(lg *zap.Logger, cachedPerms *unifiedRangePermissions, key []b } func (as *authStore) isRangeOpPermitted(userName string, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool { + // assumption: tx is Lock()ed as.rangePermCacheMu.RLock() defer as.rangePermCacheMu.RUnlock() @@ -127,19 +127,21 @@ func (as *authStore) isRangeOpPermitted(userName string, key, rangeEnd []byte, p return checkKeyInterval(as.lg, rangePerm, key, rangeEnd, permtyp) } -func (as *authStore) refreshRangePermCache(tx backend.ReadTx) { +func (as *authStore) refreshRangePermCache(tx UnsafeAuthReader) { // Note that every authentication configuration update calls this method and it invalidates the entire // rangePermCache and reconstruct it based on information of users and roles stored in the backend. // This can be a costly operation. as.rangePermCacheMu.Lock() defer as.rangePermCacheMu.Unlock() + as.lg.Debug("Refreshing rangePermCache") + as.rangePermCache = make(map[string]*unifiedRangePermissions) - users := getAllUsers(as.lg, tx) + users := tx.UnsafeGetAllUsers() for _, user := range users { userName := string(user.Name) - perms := getMergedPerms(as.lg, tx, userName) + perms := getMergedPerms(tx, userName) if perms == nil { as.lg.Error( "failed to create a merged permission", @@ -187,7 +189,7 @@ func isValidPermissionRange(key, rangeEnd []byte) bool { if len(key) == 0 { return false } - if rangeEnd == nil || len(rangeEnd) == 0 { // ensure rule b1 + if len(rangeEnd) == 0 { // ensure rule b1 return true } @@ -197,9 +199,5 @@ func isValidPermissionRange(key, rangeEnd []byte) bool { return true } - if isOpenEnded(rangeEnd) { - return true - } - - return false + return isOpenEnded(rangeEnd) } diff --git a/vendor/go.etcd.io/etcd/server/v3/auth/simple_token.go b/vendor/go.etcd.io/etcd/server/v3/auth/simple_token.go index a657d9daefc87..f8272b185d947 100644 --- a/vendor/go.etcd.io/etcd/server/v3/auth/simple_token.go +++ b/vendor/go.etcd.io/etcd/server/v3/auth/simple_token.go @@ -20,6 +20,7 @@ package auth import ( "context" "crypto/rand" + "errors" "fmt" "math/big" "strconv" @@ -212,7 +213,11 @@ func (t *tokenSimple) info(ctx context.Context, token string, revision uint64) ( func (t *tokenSimple) assign(ctx context.Context, username string, rev uint64) (string, error) { // rev isn't used in simple token, it is only used in JWT - index := ctx.Value(AuthenticateParamIndex{}).(uint64) + var index uint64 + var ok bool + if index, ok = ctx.Value(AuthenticateParamIndex{}).(uint64); !ok { + return "", errors.New("failed to assign") + } simpleTokenPrefix := ctx.Value(AuthenticateParamSimpleTokenPrefix{}).(string) token := fmt.Sprintf("%s.%d", simpleTokenPrefix, index) t.assignSimpleTokenToUser(username, token) @@ -231,7 +236,7 @@ func (t *tokenSimple) isValidSimpleToken(ctx context.Context, token string) bool } select { - case <-t.indexWaiter(uint64(index)): + case <-t.indexWaiter(index): return true case <-ctx.Done(): } diff --git a/vendor/go.etcd.io/etcd/server/v3/auth/store.go b/vendor/go.etcd.io/etcd/server/v3/auth/store.go index 67c5694cde31d..387aa560494c7 100644 --- a/vendor/go.etcd.io/etcd/server/v3/auth/store.go +++ b/vendor/go.etcd.io/etcd/server/v3/auth/store.go @@ -18,7 +18,6 @@ import ( "bytes" "context" "encoding/base64" - "encoding/binary" "errors" "sort" "strings" @@ -26,25 +25,19 @@ import ( "sync/atomic" "time" - "go.etcd.io/etcd/api/v3/authpb" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - "go.etcd.io/etcd/server/v3/mvcc/backend" - "go.etcd.io/etcd/server/v3/mvcc/buckets" - "go.uber.org/zap" "golang.org/x/crypto/bcrypt" "google.golang.org/grpc/credentials" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" + + "go.etcd.io/etcd/api/v3/authpb" + pb "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" ) var ( - enableFlagKey = []byte("authEnabled") - authEnabled = []byte{1} - authDisabled = []byte{0} - - revisionKey = []byte("authRevision") + rootPerm = authpb.Permission{PermType: authpb.READWRITE, Key: []byte{}, RangeEnd: []byte{0}} ErrRootUserNotExist = errors.New("auth: root user does not exist") ErrRootRoleNotExist = errors.New("auth: root user does not have root role") @@ -77,8 +70,6 @@ const ( tokenTypeSimple = "simple" tokenTypeJWT = "jwt" - - revBytesLen = 8 ) type AuthInfo struct { @@ -107,7 +98,7 @@ type AuthStore interface { Authenticate(ctx context.Context, username, password string) (*pb.AuthenticateResponse, error) // Recover recovers the state of auth store from the given backend - Recover(b backend.Backend) + Recover(be AuthBackend) // UserAdd adds a new user UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) @@ -199,12 +190,59 @@ type TokenProvider interface { genTokenPrefix() (string, error) } +type AuthBackend interface { + CreateAuthBuckets() + ForceCommit() + ReadTx() AuthReadTx + BatchTx() AuthBatchTx + + GetUser(string) *authpb.User + GetAllUsers() []*authpb.User + GetRole(string) *authpb.Role + GetAllRoles() []*authpb.Role +} + +type AuthReadTx interface { + RLock() + RUnlock() + UnsafeAuthReader +} + +type UnsafeAuthReader interface { + UnsafeReadAuthEnabled() bool + UnsafeReadAuthRevision() uint64 + UnsafeGetUser(string) *authpb.User + UnsafeGetRole(string) *authpb.Role + UnsafeGetAllUsers() []*authpb.User + UnsafeGetAllRoles() []*authpb.Role +} + +type AuthBatchTx interface { + Lock() + Unlock() + UnsafeAuthReadWriter +} + +type UnsafeAuthReadWriter interface { + UnsafeAuthReader + UnsafeAuthWriter +} + +type UnsafeAuthWriter interface { + UnsafeSaveAuthEnabled(enabled bool) + UnsafeSaveAuthRevision(rev uint64) + UnsafePutUser(*authpb.User) + UnsafeDeleteUser(string) + UnsafePutRole(*authpb.Role) + UnsafeDeleteRole(string) +} + type authStore struct { // atomic operations; need 64-bit align, or 32-bit tests will crash revision uint64 lg *zap.Logger - be backend.Backend + be AuthBackend enabled bool enabledMu sync.RWMutex @@ -228,15 +266,14 @@ func (as *authStore) AuthEnable() error { as.lg.Info("authentication is already enabled; ignored auth enable request") return nil } - b := as.be - tx := b.BatchTx() - tx.LockInsideApply() + tx := as.be.BatchTx() + tx.Lock() defer func() { tx.Unlock() - b.ForceCommit() + as.be.ForceCommit() }() - u := getUser(as.lg, tx, rootUser) + u := tx.UnsafeGetUser(rootUser) if u == nil { return ErrRootUserNotExist } @@ -245,14 +282,13 @@ func (as *authStore) AuthEnable() error { return ErrRootRoleNotExist } - tx.UnsafePut(buckets.Auth, enableFlagKey, authEnabled) - + tx.UnsafeSaveAuthEnabled(true) as.enabled = true as.tokenProvider.enable() as.refreshRangePermCache(tx) - as.setRevision(getRevision(tx)) + as.setRevision(tx.UnsafeReadAuthRevision()) as.lg.Info("enabled authentication") return nil @@ -265,11 +301,13 @@ func (as *authStore) AuthDisable() { return } b := as.be + tx := b.BatchTx() - tx.LockInsideApply() - tx.UnsafePut(buckets.Auth, enableFlagKey, authDisabled) + tx.Lock() + tx.UnsafeSaveAuthEnabled(false) as.commitRevision(tx) tx.Unlock() + b.ForceCommit() as.enabled = false @@ -292,12 +330,7 @@ func (as *authStore) Authenticate(ctx context.Context, username, password string if !as.IsAuthEnabled() { return nil, ErrAuthNotEnabled } - - tx := as.be.BatchTx() - tx.LockInsideApply() - defer tx.Unlock() - - user := getUser(as.lg, tx, username) + user := as.be.GetUser(username) if user == nil { return nil, ErrAuthFailed } @@ -332,10 +365,10 @@ func (as *authStore) CheckPassword(username, password string) (uint64, error) { // to avoid putting it in the critical section of the tx lock. revision, err := func() (uint64, error) { tx := as.be.ReadTx() - tx.Lock() - defer tx.Unlock() + tx.RLock() + defer tx.RUnlock() - user = getUser(as.lg, tx, username) + user = tx.UnsafeGetUser(username) if user == nil { return 0, ErrAuthFailed } @@ -344,7 +377,7 @@ func (as *authStore) CheckPassword(username, password string) (uint64, error) { return 0, ErrNoPasswordUser } - return getRevision(tx), nil + return tx.UnsafeReadAuthRevision(), nil }() if err != nil { return 0, err @@ -357,22 +390,16 @@ func (as *authStore) CheckPassword(username, password string) (uint64, error) { return revision, nil } -func (as *authStore) Recover(be backend.Backend) { - enabled := false +func (as *authStore) Recover(be AuthBackend) { as.be = be tx := be.ReadTx() - tx.Lock() - _, vs := tx.UnsafeRange(buckets.Auth, enableFlagKey, nil, 0) - if len(vs) == 1 { - if bytes.Equal(vs[0], authEnabled) { - enabled = true - } - } + tx.RLock() - as.setRevision(getRevision(tx)) + enabled := tx.UnsafeReadAuthEnabled() + as.setRevision(tx.UnsafeReadAuthRevision()) as.refreshRangePermCache(tx) - tx.Unlock() + tx.RUnlock() as.enabledMu.Lock() as.enabled = enabled @@ -396,10 +423,10 @@ func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, } tx := as.be.BatchTx() - tx.LockInsideApply() + tx.Lock() defer tx.Unlock() - user := getUser(as.lg, tx, r.Name) + user := tx.UnsafeGetUser(r.Name) if user != nil { return nil, ErrUserAlreadyExist } @@ -426,8 +453,7 @@ func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, Password: password, Options: options, } - - putUser(as.lg, tx, newUser) + tx.UnsafePutUser(newUser) as.commitRevision(tx) as.refreshRangePermCache(tx) @@ -443,15 +469,14 @@ func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDelete } tx := as.be.BatchTx() - tx.LockInsideApply() + tx.Lock() defer tx.Unlock() - user := getUser(as.lg, tx, r.Name) + user := tx.UnsafeGetUser(r.Name) if user == nil { return nil, ErrUserNotFound } - - delUser(tx, r.Name) + tx.UnsafeDeleteUser(r.Name) as.commitRevision(tx) as.refreshRangePermCache(tx) @@ -468,10 +493,10 @@ func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDelete func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) { tx := as.be.BatchTx() - tx.LockInsideApply() + tx.Lock() defer tx.Unlock() - user := getUser(as.lg, tx, r.Name) + user := tx.UnsafeGetUser(r.Name) if user == nil { return nil, ErrUserNotFound } @@ -493,8 +518,7 @@ func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*p Password: password, Options: user.Options, } - - putUser(as.lg, tx, updatedUser) + tx.UnsafePutUser(updatedUser) as.commitRevision(tx) as.refreshRangePermCache(tx) @@ -511,16 +535,16 @@ func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*p func (as *authStore) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) { tx := as.be.BatchTx() - tx.LockInsideApply() + tx.Lock() defer tx.Unlock() - user := getUser(as.lg, tx, r.User) + user := tx.UnsafeGetUser(r.User) if user == nil { return nil, ErrUserNotFound } if r.Role != rootRole { - role := getRole(as.lg, tx, r.Role) + role := tx.UnsafeGetRole(r.Role) if role == nil { return nil, ErrRoleNotFound } @@ -540,7 +564,7 @@ func (as *authStore) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUser user.Roles = append(user.Roles, r.Role) sort.Strings(user.Roles) - putUser(as.lg, tx, user) + tx.UnsafePutUser(user) as.commitRevision(tx) as.refreshRangePermCache(tx) @@ -555,10 +579,7 @@ func (as *authStore) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUser } func (as *authStore) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { - tx := as.be.BatchTx() - tx.LockInsideApply() - user := getUser(as.lg, tx, r.Name) - tx.Unlock() + user := as.be.GetUser(r.Name) if user == nil { return nil, ErrUserNotFound @@ -570,10 +591,7 @@ func (as *authStore) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, } func (as *authStore) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) { - tx := as.be.BatchTx() - tx.LockInsideApply() - users := getAllUsers(as.lg, tx) - tx.Unlock() + users := as.be.GetAllUsers() resp := &pb.AuthUserListResponse{Users: make([]string, len(users))} for i := range users { @@ -593,10 +611,10 @@ func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUs } tx := as.be.BatchTx() - tx.LockInsideApply() + tx.Lock() defer tx.Unlock() - user := getUser(as.lg, tx, r.Name) + user := tx.UnsafeGetUser(r.Name) if user == nil { return nil, ErrUserNotFound } @@ -617,7 +635,7 @@ func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUs return nil, ErrRoleNotGranted } - putUser(as.lg, tx, updatedUser) + tx.UnsafePutUser(updatedUser) as.commitRevision(tx) as.refreshRangePermCache(tx) @@ -633,25 +651,22 @@ func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUs } func (as *authStore) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) { - tx := as.be.BatchTx() - tx.LockInsideApply() - defer tx.Unlock() - var resp pb.AuthRoleGetResponse - role := getRole(as.lg, tx, r.Role) + role := as.be.GetRole(r.Role) if role == nil { return nil, ErrRoleNotFound } - resp.Perm = append(resp.Perm, role.KeyPermission...) + if rootRole == string(role.Name) { + resp.Perm = append(resp.Perm, &rootPerm) + } else { + resp.Perm = append(resp.Perm, role.KeyPermission...) + } return &resp, nil } func (as *authStore) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) { - tx := as.be.BatchTx() - tx.LockInsideApply() - roles := getAllRoles(as.lg, tx) - tx.Unlock() + roles := as.be.GetAllRoles() resp := &pb.AuthRoleListResponse{Roles: make([]string, len(roles))} for i := range roles { @@ -662,10 +677,10 @@ func (as *authStore) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListRespon func (as *authStore) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) { tx := as.be.BatchTx() - tx.LockInsideApply() + tx.Lock() defer tx.Unlock() - role := getRole(as.lg, tx, r.Role) + role := tx.UnsafeGetRole(r.Role) if role == nil { return nil, ErrRoleNotFound } @@ -684,7 +699,7 @@ func (as *authStore) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) return nil, ErrPermissionNotGranted } - putRole(as.lg, tx, updatedRole) + tx.UnsafePutRole(updatedRole) as.commitRevision(tx) as.refreshRangePermCache(tx) @@ -705,17 +720,17 @@ func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDelete } tx := as.be.BatchTx() - tx.LockInsideApply() + tx.Lock() defer tx.Unlock() - role := getRole(as.lg, tx, r.Role) + role := tx.UnsafeGetRole(r.Role) if role == nil { return nil, ErrRoleNotFound } - delRole(tx, r.Role) + tx.UnsafeDeleteRole(r.Role) - users := getAllUsers(as.lg, tx) + users := tx.UnsafeGetAllUsers() for _, user := range users { updatedUser := &authpb.User{ Name: user.Name, @@ -733,8 +748,7 @@ func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDelete continue } - putUser(as.lg, tx, updatedUser) - + tx.UnsafePutUser(updatedUser) } as.commitRevision(tx) @@ -750,10 +764,10 @@ func (as *authStore) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, } tx := as.be.BatchTx() - tx.LockInsideApply() + tx.Lock() defer tx.Unlock() - role := getRole(as.lg, tx, r.Name) + role := tx.UnsafeGetRole(r.Name) if role != nil { return nil, ErrRoleAlreadyExist } @@ -762,7 +776,7 @@ func (as *authStore) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, Name: []byte(r.Name), } - putRole(as.lg, tx, newRole) + tx.UnsafePutRole(newRole) as.commitRevision(tx) @@ -797,10 +811,10 @@ func (as *authStore) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) ( } tx := as.be.BatchTx() - tx.LockInsideApply() + tx.Lock() defer tx.Unlock() - role := getRole(as.lg, tx, r.Name) + role := tx.UnsafeGetRole(r.Name) if role == nil { return nil, ErrRoleNotFound } @@ -824,7 +838,7 @@ func (as *authStore) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) ( sort.Sort(permSlice(role.KeyPermission)) } - putRole(as.lg, tx, role) + tx.UnsafePutRole(role) as.commitRevision(tx) as.refreshRangePermCache(tx) @@ -833,6 +847,8 @@ func (as *authStore) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) ( "granted/updated a permission to a user", zap.String("user-name", r.Name), zap.String("permission-name", authpb.Permission_Type_name[int32(r.Perm.PermType)]), + zap.ByteString("key", r.Perm.Key), + zap.ByteString("range-end", r.Perm.RangeEnd), ) return &pb.AuthRoleGrantPermissionResponse{}, nil } @@ -858,10 +874,10 @@ func (as *authStore) isOpPermitted(userName string, revision uint64, key, rangeE } tx := as.be.ReadTx() - tx.Lock() - defer tx.Unlock() + tx.RLock() + defer tx.RUnlock() - user := getUser(as.lg, tx, userName) + user := tx.UnsafeGetUser(userName) if user == nil { as.lg.Error("cannot find a user for permission check", zap.String("user-name", userName)) return ErrPermissionDenied @@ -900,9 +916,9 @@ func (as *authStore) IsAdminPermitted(authInfo *AuthInfo) error { } tx := as.be.ReadTx() - tx.Lock() - u := getUser(as.lg, tx, authInfo.Username) - tx.Unlock() + tx.RLock() + defer tx.RUnlock() + u := tx.UnsafeGetUser(authInfo.Username) if u == nil { return ErrUserNotFound @@ -915,111 +931,6 @@ func (as *authStore) IsAdminPermitted(authInfo *AuthInfo) error { return nil } -func getUser(lg *zap.Logger, tx backend.ReadTx, username string) *authpb.User { - _, vs := tx.UnsafeRange(buckets.AuthUsers, []byte(username), nil, 0) - if len(vs) == 0 { - return nil - } - - user := &authpb.User{} - err := user.Unmarshal(vs[0]) - if err != nil { - lg.Panic( - "failed to unmarshal 'authpb.User'", - zap.String("user-name", username), - zap.Error(err), - ) - } - return user -} - -func getAllUsers(lg *zap.Logger, tx backend.ReadTx) []*authpb.User { - var vs [][]byte - err := tx.UnsafeForEach(buckets.AuthUsers, func(k []byte, v []byte) error { - vs = append(vs, v) - return nil - }) - if err != nil { - lg.Panic("failed to get users", - zap.Error(err)) - } - if len(vs) == 0 { - return nil - } - - users := make([]*authpb.User, len(vs)) - for i := range vs { - user := &authpb.User{} - err := user.Unmarshal(vs[i]) - if err != nil { - lg.Panic("failed to unmarshal 'authpb.User'", zap.Error(err)) - } - users[i] = user - } - return users -} - -func putUser(lg *zap.Logger, tx backend.BatchTx, user *authpb.User) { - b, err := user.Marshal() - if err != nil { - lg.Panic("failed to unmarshal 'authpb.User'", zap.Error(err)) - } - tx.UnsafePut(buckets.AuthUsers, user.Name, b) -} - -func delUser(tx backend.BatchTx, username string) { - tx.UnsafeDelete(buckets.AuthUsers, []byte(username)) -} - -func getRole(lg *zap.Logger, tx backend.ReadTx, rolename string) *authpb.Role { - _, vs := tx.UnsafeRange(buckets.AuthRoles, []byte(rolename), nil, 0) - if len(vs) == 0 { - return nil - } - - role := &authpb.Role{} - err := role.Unmarshal(vs[0]) - if err != nil { - lg.Panic("failed to unmarshal 'authpb.Role'", zap.Error(err)) - } - return role -} - -func getAllRoles(lg *zap.Logger, tx backend.ReadTx) []*authpb.Role { - _, vs := tx.UnsafeRange(buckets.AuthRoles, []byte{0}, []byte{0xff}, -1) - if len(vs) == 0 { - return nil - } - - roles := make([]*authpb.Role, len(vs)) - for i := range vs { - role := &authpb.Role{} - err := role.Unmarshal(vs[i]) - if err != nil { - lg.Panic("failed to unmarshal 'authpb.Role'", zap.Error(err)) - } - roles[i] = role - } - return roles -} - -func putRole(lg *zap.Logger, tx backend.BatchTx, role *authpb.Role) { - b, err := role.Marshal() - if err != nil { - lg.Panic( - "failed to marshal 'authpb.Role'", - zap.String("role-name", string(role.Name)), - zap.Error(err), - ) - } - - tx.UnsafePut(buckets.AuthRoles, role.Name, b) -} - -func delRole(tx backend.BatchTx, rolename string) { - tx.UnsafeDelete(buckets.AuthRoles, []byte(rolename)) -} - func (as *authStore) IsAuthEnabled() bool { as.enabledMu.RLock() defer as.enabledMu.RUnlock() @@ -1027,7 +938,7 @@ func (as *authStore) IsAuthEnabled() bool { } // NewAuthStore creates a new AuthStore. -func NewAuthStore(lg *zap.Logger, be backend.Backend, tp TokenProvider, bcryptCost int) *authStore { +func NewAuthStore(lg *zap.Logger, be AuthBackend, tp TokenProvider, bcryptCost int) *authStore { if lg == nil { lg = zap.NewNop() } @@ -1043,23 +954,14 @@ func NewAuthStore(lg *zap.Logger, be backend.Backend, tp TokenProvider, bcryptCo bcryptCost = bcrypt.DefaultCost } + be.CreateAuthBuckets() tx := be.BatchTx() - tx.LockOutsideApply() - - tx.UnsafeCreateBucket(buckets.Auth) - tx.UnsafeCreateBucket(buckets.AuthUsers) - tx.UnsafeCreateBucket(buckets.AuthRoles) - - enabled := false - _, vs := tx.UnsafeRange(buckets.Auth, enableFlagKey, nil, 0) - if len(vs) == 1 { - if bytes.Equal(vs[0], authEnabled) { - enabled = true - } - } - + // We should call LockOutsideApply here, but the txPostLockHoos isn't set + // to EtcdServer yet, so it's OK. + tx.Lock() + enabled := tx.UnsafeReadAuthEnabled() as := &authStore{ - revision: getRevision(tx), + revision: tx.UnsafeReadAuthRevision(), lg: lg, be: be, enabled: enabled, @@ -1092,20 +994,9 @@ func hasRootRole(u *authpb.User) bool { return idx != len(u.Roles) && u.Roles[idx] == rootRole } -func (as *authStore) commitRevision(tx backend.BatchTx) { +func (as *authStore) commitRevision(tx UnsafeAuthWriter) { atomic.AddUint64(&as.revision, 1) - revBytes := make([]byte, revBytesLen) - binary.BigEndian.PutUint64(revBytes, as.Revision()) - tx.UnsafePut(buckets.Auth, revisionKey, revBytes) -} - -func getRevision(tx backend.ReadTx) uint64 { - _, vs := tx.UnsafeRange(buckets.Auth, revisionKey, nil, 0) - if len(vs) != 1 { - // this can happen in the initialization phase - return 0 - } - return binary.BigEndian.Uint64(vs[0]) + tx.UnsafeSaveAuthRevision(as.Revision()) } func (as *authStore) setRevision(rev uint64) { @@ -1163,6 +1054,7 @@ func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) { if !as.IsAuthEnabled() { return nil, nil } + md, ok := metadata.FromIncomingContext(ctx) if !ok { return nil, nil @@ -1221,7 +1113,6 @@ func decomposeOpts(lg *zap.Logger, optstr string) (string, map[string]string, er } return tokenType, typeSpecificOpts, nil - } // NewTokenProvider creates a new token provider. @@ -1302,8 +1193,8 @@ func (as *authStore) WithRoot(ctx context.Context) context.Context { func (as *authStore) HasRole(user, role string) bool { tx := as.be.BatchTx() - tx.LockInsideApply() - u := getUser(as.lg, tx, user) + tx.Lock() + u := tx.UnsafeGetUser(user) tx.Unlock() if u == nil { diff --git a/vendor/go.etcd.io/etcd/server/v3/config/config.go b/vendor/go.etcd.io/etcd/server/v3/config/config.go index bf15b54560eed..32b7a26191bd3 100644 --- a/vendor/go.etcd.io/etcd/server/v3/config/config.go +++ b/vendor/go.etcd.io/etcd/server/v3/config/config.go @@ -22,24 +22,29 @@ import ( "strings" "time" - "go.etcd.io/etcd/client/pkg/v3/transport" - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/pkg/v3/netutil" - "go.etcd.io/etcd/server/v3/datadir" "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + "go.uber.org/zap" bolt "go.etcd.io/bbolt" - "go.uber.org/zap" + "go.etcd.io/etcd/client/pkg/v3/transport" + "go.etcd.io/etcd/client/pkg/v3/types" + "go.etcd.io/etcd/pkg/v3/featuregate" + "go.etcd.io/etcd/pkg/v3/netutil" + "go.etcd.io/etcd/server/v3/etcdserver/api/v3discovery" + "go.etcd.io/etcd/server/v3/storage/datadir" ) // ServerConfig holds the configuration of etcd as taken from the command line or discovery. type ServerConfig struct { - Name string + Name string + DiscoveryURL string DiscoveryProxy string - ClientURLs types.URLs - PeerURLs types.URLs - DataDir string + DiscoveryCfg v3discovery.DiscoveryConfig + + ClientURLs types.URLs + PeerURLs types.URLs + DataDir string // DedicatedWALDir config will make the etcd to write the WAL to the WALDir // rather than the dataDir/member/wal. DedicatedWALDir string @@ -51,7 +56,6 @@ type ServerConfig struct { // We expect the follower has a millisecond level latency with the leader. // The max throughput is around 10K. Keep a 5K entries is enough for helping // follower to catch up. - // WARNING: only change this for tests. Always use "DefaultSnapshotCatchUpEntries" SnapshotCatchUpEntries uint64 MaxSnapFiles uint @@ -125,7 +129,8 @@ type ServerConfig struct { // streams that each client can open at a time. MaxConcurrentStreams uint32 - WarningApplyDuration time.Duration + WarningApplyDuration time.Duration + WarningUnaryRequestDuration time.Duration StrictReconfigCheck bool @@ -188,15 +193,21 @@ type ServerConfig struct { // a shared buffer in its readonly check operations. ExperimentalTxnModeWriteWithSharedBuffer bool `json:"experimental-txn-mode-write-with-shared-buffer"` - // ExperimentalStopGRPCServiceOnDefrag enables etcd gRPC service to stop serving client requests on defragmentation. - ExperimentalStopGRPCServiceOnDefrag bool `json:"experimental-stop-grpc-service-on-defrag"` - // ExperimentalBootstrapDefragThresholdMegabytes is the minimum number of megabytes needed to be freed for etcd server to // consider running defrag during bootstrap. Needs to be set to non-zero value to take effect. ExperimentalBootstrapDefragThresholdMegabytes uint `json:"experimental-bootstrap-defrag-threshold-megabytes"` + // ExperimentalMaxLearners sets a limit to the number of learner members that can exist in the cluster membership. + ExperimentalMaxLearners int `json:"experimental-max-learners"` + // V2Deprecation defines a phase of v2store deprecation process. V2Deprecation V2DeprecationEnum `json:"v2-deprecation"` + + // ExperimentalLocalAddress is the local IP address to use when communicating with a peer. + ExperimentalLocalAddress string `json:"experimental-local-address"` + + // ServerFeatureGate is a server level feature gate + ServerFeatureGate featuregate.FeatureGate } // VerifyBootstrap sanity-checks the initial config for bootstrap case @@ -262,7 +273,7 @@ func (c *ServerConfig) advertiseMatchesCluster() error { initMap[url.String()] = struct{}{} } - missing := []string{} + var missing []string for url := range initMap { if _, ok := apMap[url]; !ok { missing = append(missing, url) @@ -274,7 +285,7 @@ func (c *ServerConfig) advertiseMatchesCluster() error { } mstr := strings.Join(missing, ",") apStr := strings.Join(apurls, ",") - return fmt.Errorf("--initial-cluster has %s but missing from --initial-advertise-peer-urls=%s (%v)", mstr, apStr, err) + return fmt.Errorf("--initial-cluster has %s but missing from --initial-advertise-peer-urls=%s (%w)", mstr, apStr, err) } for url := range apMap { @@ -291,7 +302,7 @@ func (c *ServerConfig) advertiseMatchesCluster() error { // resolved URLs from "--initial-advertise-peer-urls" and "--initial-cluster" did not match or failed apStr := strings.Join(apurls, ",") umap := types.URLsMap(map[string]types.URLs{c.Name: c.PeerURLs}) - return fmt.Errorf("failed to resolve %s to match --initial-cluster=%s (%v)", apStr, umap.String(), err) + return fmt.Errorf("failed to resolve %s to match --initial-cluster=%s (%w)", apStr, umap.String(), err) } func (c *ServerConfig) MemberDir() string { return datadir.ToMemberDir(c.DataDir) } @@ -300,12 +311,14 @@ func (c *ServerConfig) WALDir() string { if c.DedicatedWALDir != "" { return c.DedicatedWALDir } - return datadir.ToWalDir(c.DataDir) + return datadir.ToWALDir(c.DataDir) } func (c *ServerConfig) SnapDir() string { return filepath.Join(c.MemberDir(), "snap") } -func (c *ServerConfig) ShouldDiscover() bool { return c.DiscoveryURL != "" } +func (c *ServerConfig) ShouldDiscover() bool { + return c.DiscoveryURL != "" || len(c.DiscoveryCfg.Endpoints) > 0 +} // ReqTimeout returns timeout for request to finish. func (c *ServerConfig) ReqTimeout() time.Duration { diff --git a/vendor/go.etcd.io/etcd/server/v3/config/v2_deprecation.go b/vendor/go.etcd.io/etcd/server/v3/config/v2_deprecation.go index 828bd9a8f4300..c66fd0c05d681 100644 --- a/vendor/go.etcd.io/etcd/server/v3/config/v2_deprecation.go +++ b/vendor/go.etcd.io/etcd/server/v3/config/v2_deprecation.go @@ -17,18 +17,49 @@ package config type V2DeprecationEnum string const ( - // Default in v3.5. Issues a warning if v2store have meaningful content. - V2_DEPR_0_NOT_YET = V2DeprecationEnum("not-yet") + // No longer supported in v3.6 + V2Depr0NotYet = V2DeprecationEnum("not-yet") + // No longer supported in v3.6 + // + // Deprecated: Please use V2Depr0NotYet. + //revive:disable-next-line:var-naming + V2_DEPR_0_NOT_YET = V2Depr0NotYet // Default in v3.6. Meaningful v2 state is not allowed. // The V2 files are maintained for v3.5 rollback. - V2_DEPR_1_WRITE_ONLY = V2DeprecationEnum("write-only") + + V2Depr1WriteOnly = V2DeprecationEnum("write-only") + // Default in v3.6. Meaningful v2 state is not allowed. + // The V2 files are maintained for v3.5 rollback. + // + // Deprecated: Please use V2Depr1WriteOnly. + //revive:disable-next-line:var-naming + V2_DEPR_1_WRITE_ONLY = V2Depr1WriteOnly + // V2store is WIPED if found !!! - V2_DEPR_1_WRITE_ONLY_DROP = V2DeprecationEnum("write-only-drop-data") + V2Depr1WriteOnlyDrop = V2DeprecationEnum("write-only-drop-data") + // V2store is WIPED if found !!! + // + // Deprecated: Pleae use V2Depr1WriteOnlyDrop. + //revive:disable-next-line:var-naming + V2_DEPR_1_WRITE_ONLY_DROP = V2Depr1WriteOnlyDrop + + // V2store is neither written nor read. Usage of this configuration is blocking + // ability to rollback to etcd v3.5. + V2Depr2Gone = V2DeprecationEnum("gone") // V2store is neither written nor read. Usage of this configuration is blocking // ability to rollback to etcd v3.5. - V2_DEPR_2_GONE = V2DeprecationEnum("gone") + // + // Deprecated: Please use V2Depr2Gone + //revive:disable-next-line:var-naming + V2_DEPR_2_GONE = V2Depr2Gone - V2_DEPR_DEFAULT = V2_DEPR_0_NOT_YET + // Default deprecation level. + V2DeprDefault = V2Depr1WriteOnly + // Default deprecation level. + // + // Deprecated: Please use V2DeprDefault. + //revive:disable-next-line:var-naming + V2_DEPR_DEFAULT = V2DeprDefault ) func (e V2DeprecationEnum) IsAtLeast(v2d V2DeprecationEnum) bool { @@ -37,13 +68,13 @@ func (e V2DeprecationEnum) IsAtLeast(v2d V2DeprecationEnum) bool { func (e V2DeprecationEnum) level() int { switch e { - case V2_DEPR_0_NOT_YET: + case V2Depr0NotYet: return 0 - case V2_DEPR_1_WRITE_ONLY: + case V2Depr1WriteOnly: return 1 - case V2_DEPR_1_WRITE_ONLY_DROP: + case V2Depr1WriteOnlyDrop: return 2 - case V2_DEPR_2_GONE: + case V2Depr2Gone: return 3 } panic("Unknown V2DeprecationEnum: " + e) diff --git a/vendor/go.etcd.io/etcd/server/v3/embed/config.go b/vendor/go.etcd.io/etcd/server/v3/embed/config.go index 66785265cd972..5c2fd38b97d57 100644 --- a/vendor/go.etcd.io/etcd/server/v3/embed/config.go +++ b/vendor/go.etcd.io/etcd/server/v3/embed/config.go @@ -16,11 +16,13 @@ package embed import ( "crypto/tls" + "errors" + "flag" "fmt" - "io/ioutil" "math" "net" "net/http" + "net/netip" "net/url" "os" "path/filepath" @@ -28,40 +30,54 @@ import ( "sync" "time" + "go.uber.org/zap" + "golang.org/x/crypto/bcrypt" + "google.golang.org/grpc" + "sigs.k8s.io/yaml" + + bolt "go.etcd.io/bbolt" "go.etcd.io/etcd/client/pkg/v3/logutil" "go.etcd.io/etcd/client/pkg/v3/srv" "go.etcd.io/etcd/client/pkg/v3/tlsutil" "go.etcd.io/etcd/client/pkg/v3/transport" "go.etcd.io/etcd/client/pkg/v3/types" + clientv3 "go.etcd.io/etcd/client/v3" + "go.etcd.io/etcd/pkg/v3/featuregate" "go.etcd.io/etcd/pkg/v3/flags" "go.etcd.io/etcd/pkg/v3/netutil" "go.etcd.io/etcd/server/v3/config" "go.etcd.io/etcd/server/v3/etcdserver" + "go.etcd.io/etcd/server/v3/etcdserver/api/membership" + "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp" "go.etcd.io/etcd/server/v3/etcdserver/api/v3compactor" - - bolt "go.etcd.io/bbolt" - "go.uber.org/multierr" - "go.uber.org/zap" - "golang.org/x/crypto/bcrypt" - "google.golang.org/grpc" - "sigs.k8s.io/yaml" + "go.etcd.io/etcd/server/v3/etcdserver/api/v3discovery" + "go.etcd.io/etcd/server/v3/features" ) const ( ClusterStateFlagNew = "new" ClusterStateFlagExisting = "existing" - DefaultName = "default" - DefaultMaxSnapshots = 5 - DefaultMaxWALs = 5 - DefaultMaxTxnOps = uint(128) - DefaultWarningApplyDuration = 100 * time.Millisecond - DefaultMaxRequestBytes = 1.5 * 1024 * 1024 - DefaultMaxConcurrentStreams = math.MaxUint32 - DefaultGRPCKeepAliveMinTime = 5 * time.Second - DefaultGRPCKeepAliveInterval = 2 * time.Hour - DefaultGRPCKeepAliveTimeout = 20 * time.Second - DefaultDowngradeCheckTime = 5 * time.Second + DefaultName = "default" + DefaultMaxSnapshots = 5 + DefaultMaxWALs = 5 + DefaultMaxTxnOps = uint(128) + DefaultWarningApplyDuration = 100 * time.Millisecond + DefaultWarningUnaryRequestDuration = 300 * time.Millisecond + DefaultMaxRequestBytes = 1.5 * 1024 * 1024 + DefaultMaxConcurrentStreams = math.MaxUint32 + DefaultGRPCKeepAliveMinTime = 5 * time.Second + DefaultGRPCKeepAliveInterval = 2 * time.Hour + DefaultGRPCKeepAliveTimeout = 20 * time.Second + DefaultDowngradeCheckTime = 5 * time.Second + DefaultAutoCompactionMode = "periodic" + DefaultAuthToken = "simple" + DefaultExperimentalCompactHashCheckTime = time.Minute + + DefaultDiscoveryDialTimeout = 2 * time.Second + DefaultDiscoveryRequestTimeOut = 5 * time.Second + DefaultDiscoveryKeepAliveTime = 2 * time.Second + DefaultDiscoveryKeepAliveTimeOut = 6 * time.Second DefaultListenPeerURLs = "http://localhost:2380" DefaultListenClientURLs = "http://localhost:2379" @@ -88,20 +104,19 @@ const ( // DefaultStrictReconfigCheck is the default value for "--strict-reconfig-check" flag. // It's enabled by default. DefaultStrictReconfigCheck = true - // DefaultEnableV2 is the default value for "--enable-v2" flag. - // v2 API is disabled by default. - DefaultEnableV2 = false // maxElectionMs specifies the maximum value of election timeout. - // More details are listed in ../Documentation/tuning.md#time-parameters. + // More details are listed on etcd.io/docs > version > tuning/#time-parameters maxElectionMs = 50000 // backend freelist map type freelistArrayType = "array" + + ServerFeatureGateFlagName = "feature-gates" ) var ( ErrConflictBootstrapFlags = fmt.Errorf("multiple discovery or bootstrap flags are set. " + - "Choose one of \"initial-cluster\", \"discovery\" or \"discovery-srv\"") + "Choose one of \"initial-cluster\", \"discovery\", \"discovery-endpoints\" or \"discovery-srv\"") ErrUnsetAdvertiseClientURLsFlag = fmt.Errorf("--advertise-client-urls is required when --listen-client-urls is set explicitly") ErrLogRotationInvalidLogOutput = fmt.Errorf("--log-outputs requires a single file path when --log-rotate-config-json is defined") @@ -138,8 +153,9 @@ func init() { // Config holds the arguments for configuring an etcd server. type Config struct { - Name string `json:"name"` - Dir string `json:"data-dir"` + Name string `json:"name"` + Dir string `json:"data-dir"` + //revive:disable-next-line:var-naming WalDir string `json:"wal-dir"` SnapshotCount uint64 `json:"snapshot-count"` @@ -149,12 +165,11 @@ type Config struct { // We expect the follower has a millisecond level latency with the leader. // The max throughput is around 10K. Keep a 5K entries is enough for helping // follower to catch up. - // WARNING: only change this for tests. - // Always use "DefaultSnapshotCatchUpEntries" SnapshotCatchUpEntries uint64 `json:"experimental-snapshot-catch-up-entries"` MaxSnapFiles uint `json:"max-snapshots"` - MaxWalFiles uint `json:"max-wals"` + //revive:disable-next-line:var-naming + MaxWalFiles uint `json:"max-wals"` // TickMs is the number of milliseconds between heartbeat ticks. // TODO: decouple tickMs and heartbeat tick (current heartbeat tick = 1). @@ -205,12 +220,21 @@ type Config struct { // streams that each client can open at a time. MaxConcurrentStreams uint32 `json:"max-concurrent-streams"` + //revive:disable:var-naming ListenPeerUrls, ListenClientUrls, ListenClientHttpUrls []url.URL AdvertisePeerUrls, AdvertiseClientUrls []url.URL - ClientTLSInfo transport.TLSInfo - ClientAutoTLS bool - PeerTLSInfo transport.TLSInfo - PeerAutoTLS bool + //revive:enable:var-naming + + ClientTLSInfo transport.TLSInfo + ClientAutoTLS bool + PeerTLSInfo transport.TLSInfo + PeerAutoTLS bool + + // ExperimentalSetMemberLocalAddr enables using the first specified and + // non-loopback local address from initial-advertise-peer-urls as the local + // address when communicating with a peer. + ExperimentalSetMemberLocalAddr bool `json:"experimental-set-member-localaddr"` + // SelfSignedCertValidity specifies the validity period of the client and peer certificates // that are automatically generated by etcd when you specify ClientAutoTLS and PeerAutoTLS, // the unit is year, and the default is 1 @@ -222,23 +246,24 @@ type Config struct { CipherSuites []string `json:"cipher-suites"` // TlsMinVersion is the minimum accepted TLS version between client/server and peers. + //revive:disable-next-line:var-naming TlsMinVersion string `json:"tls-min-version"` + // TlsMaxVersion is the maximum accepted TLS version between client/server and peers. + //revive:disable-next-line:var-naming TlsMaxVersion string `json:"tls-max-version"` ClusterState string `json:"initial-cluster-state"` DNSCluster string `json:"discovery-srv"` DNSClusterServiceName string `json:"discovery-srv-name"` Dproxy string `json:"discovery-proxy"` - Durl string `json:"discovery"` - InitialCluster string `json:"initial-cluster"` - InitialClusterToken string `json:"initial-cluster-token"` - StrictReconfigCheck bool `json:"strict-reconfig-check"` - // EnableV2 exposes the deprecated V2 API surface. - // TODO: Delete in 3.6 (https://github.com/etcd-io/etcd/issues/12913) - // Deprecated in 3.5. - EnableV2 bool `json:"enable-v2"` + Durl string `json:"discovery"` + DiscoveryCfg v3discovery.DiscoveryConfig `json:"discovery-config"` + + InitialCluster string `json:"initial-cluster"` + InitialClusterToken string `json:"initial-cluster-token"` + StrictReconfigCheck bool `json:"strict-reconfig-check"` // AutoCompactionMode is either 'periodic' or 'revision'. AutoCompactionMode string `json:"auto-compaction-mode"` @@ -263,6 +288,13 @@ type Config struct { // before closing a non-responsive connection. 0 to disable. GRPCKeepAliveTimeout time.Duration `json:"grpc-keepalive-timeout"` + // GRPCAdditionalServerOptions is the additional server option hook + // for changing the default internal gRPC configuration. Note these + // additional configurations take precedence over the existing individual + // configurations if present. Please refer to + // https://github.com/etcd-io/etcd/pull/14066#issuecomment-1248682996 + GRPCAdditionalServerOptions []grpc.ServerOption `json:"grpc-additional-server-options"` + // SocketOpts are socket options passed to listener config. SocketOpts transport.SocketOpts `json:"socket-options"` @@ -307,7 +339,7 @@ type Config struct { UserHandlers map[string]http.Handler `json:"-"` // ServiceRegister is for registering users' gRPC services. A simple usage example: // cfg := embed.NewConfig() - // cfg.ServerRegister = func(s *grpc.Server) { + // cfg.ServiceRegister = func(s *grpc.Server) { // pb.RegisterFooServer(s, &fooServer{}) // pb.RegisterBarServer(s, &barServer{}) // } @@ -317,17 +349,14 @@ type Config struct { AuthToken string `json:"auth-token"` BcryptCost uint `json:"bcrypt-cost"` - // AuthTokenTTL specifies the TTL in seconds of the simple token + // AuthTokenTTL in seconds of the simple token AuthTokenTTL uint `json:"auth-token-ttl"` ExperimentalInitialCorruptCheck bool `json:"experimental-initial-corrupt-check"` ExperimentalCorruptCheckTime time.Duration `json:"experimental-corrupt-check-time"` ExperimentalCompactHashCheckEnabled bool `json:"experimental-compact-hash-check-enabled"` ExperimentalCompactHashCheckTime time.Duration `json:"experimental-compact-hash-check-time"` - // ExperimentalEnableV2V3 configures URLs that expose deprecated V2 API working on V3 store. - // Deprecated in v3.5. - // TODO: Delete in v3.6 (https://github.com/etcd-io/etcd/issues/12913) - ExperimentalEnableV2V3 string `json:"experimental-enable-v2v3"` + // ExperimentalEnableLeaseCheckpoint enables leader to send regular checkpoints to other members to prevent reset of remaining TTL on leader change. ExperimentalEnableLeaseCheckpoint bool `json:"experimental-enable-lease-checkpoint"` // ExperimentalEnableLeaseCheckpointPersist enables persisting remainingTTL to prevent indefinite auto-renewal of long lived leases. Always enabled in v3.6. Should be used to ensure smooth upgrade from v3.5 clusters with this feature enabled. @@ -345,6 +374,13 @@ type Config struct { // ExperimentalBootstrapDefragThresholdMegabytes is the minimum number of megabytes needed to be freed for etcd server to // consider running defrag during bootstrap. Needs to be set to non-zero value to take effect. ExperimentalBootstrapDefragThresholdMegabytes uint `json:"experimental-bootstrap-defrag-threshold-megabytes"` + // WarningUnaryRequestDuration is the time duration after which a warning is generated if applying + // unary request takes more time than this value. + WarningUnaryRequestDuration time.Duration `json:"warning-unary-request-duration"` + // ExperimentalWarningUnaryRequestDuration is deprecated, please use WarningUnaryRequestDuration instead. + ExperimentalWarningUnaryRequestDuration time.Duration `json:"experimental-warning-unary-request-duration"` + // ExperimentalMaxLearners sets a limit to the number of learner members that can exist in the cluster membership. + ExperimentalMaxLearners int `json:"experimental-max-learners"` // ForceNewCluster starts a new cluster even if previously started; unsafe. ForceNewCluster bool `json:"force-new-cluster"` @@ -376,6 +412,8 @@ type Config struct { Logger string `json:"logger"` // LogLevel configures log level. Only supports debug, info, warn, error, panic, or fatal. Default 'info'. LogLevel string `json:"log-level"` + // LogFormat set log encoding. Only supports json, console. Default is 'json'. + LogFormat string `json:"log-format"` // LogOutputs is either: // - "default" as os.Stderr, // - "stderr" as os.Stderr, @@ -421,6 +459,9 @@ type Config struct { // V2Deprecation describes phase of API & Storage V2 support V2Deprecation config.V2DeprecationEnum `json:"v2-deprecation"` + + // ServerFeatureGate is a server level feature gate + ServerFeatureGate featuregate.FeatureGate } // configYAML holds the config suitable for yaml parsing @@ -431,17 +472,19 @@ type configYAML struct { // configJSON has file options that are translated into Config options type configJSON struct { - ListenPeerUrls string `json:"listen-peer-urls"` - ListenClientUrls string `json:"listen-client-urls"` - ListenClientHttpUrls string `json:"listen-client-http-urls"` - AdvertisePeerUrls string `json:"initial-advertise-peer-urls"` - AdvertiseClientUrls string `json:"advertise-client-urls"` + ListenPeerURLs string `json:"listen-peer-urls"` + ListenClientURLs string `json:"listen-client-urls"` + ListenClientHTTPURLs string `json:"listen-client-http-urls"` + AdvertisePeerURLs string `json:"initial-advertise-peer-urls"` + AdvertiseClientURLs string `json:"advertise-client-urls"` CORSJSON string `json:"cors"` HostWhitelistJSON string `json:"host-whitelist"` ClientSecurityJSON securityConfig `json:"client-transport-security"` PeerSecurityJSON securityConfig `json:"peer-transport-security"` + + ServerFeatureGatesJSON string `json:"feature-gates"` } type securityConfig struct { @@ -499,12 +542,11 @@ func NewConfig() *Config { StrictReconfigCheck: DefaultStrictReconfigCheck, Metrics: "basic", - EnableV2: DefaultEnableV2, CORS: map[string]struct{}{"*": {}}, HostWhitelist: map[string]struct{}{"*": {}}, - AuthToken: "simple", + AuthToken: DefaultAuthToken, BcryptCost: uint(bcrypt.DefaultCost), AuthTokenTTL: 300, @@ -523,16 +565,218 @@ func NewConfig() *Config { ExperimentalMemoryMlock: false, ExperimentalTxnModeWriteWithSharedBuffer: true, ExperimentalStopGRPCServiceOnDefrag: false, + ExperimentalMaxLearners: membership.DefaultMaxLearners, ExperimentalCompactHashCheckEnabled: false, - ExperimentalCompactHashCheckTime: time.Minute, + ExperimentalCompactHashCheckTime: DefaultExperimentalCompactHashCheckTime, + + V2Deprecation: config.V2DeprDefault, + + DiscoveryCfg: v3discovery.DiscoveryConfig{ + ConfigSpec: clientv3.ConfigSpec{ + DialTimeout: DefaultDiscoveryDialTimeout, + RequestTimeout: DefaultDiscoveryRequestTimeOut, + KeepAliveTime: DefaultDiscoveryKeepAliveTime, + KeepAliveTimeout: DefaultDiscoveryKeepAliveTimeOut, + + Secure: &clientv3.SecureConfig{}, + Auth: &clientv3.AuthConfig{}, + }, + }, - V2Deprecation: config.V2_DEPR_DEFAULT, + AutoCompactionMode: DefaultAutoCompactionMode, + ServerFeatureGate: features.NewDefaultServerFeatureGate(DefaultName, nil), } cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name) return cfg } +func (cfg *Config) AddFlags(fs *flag.FlagSet) { + // member + fs.StringVar(&cfg.Dir, "data-dir", cfg.Dir, "Path to the data directory.") + fs.StringVar(&cfg.WalDir, "wal-dir", cfg.WalDir, "Path to the dedicated wal directory.") + fs.Var( + flags.NewUniqueURLsWithExceptions(DefaultListenPeerURLs, ""), + "listen-peer-urls", + "List of URLs to listen on for peer traffic.", + ) + fs.Var( + flags.NewUniqueURLsWithExceptions(DefaultListenClientURLs, ""), "listen-client-urls", + "List of URLs to listen on for client grpc traffic and http as long as --listen-client-http-urls is not specified.", + ) + fs.Var( + flags.NewUniqueURLsWithExceptions("", ""), "listen-client-http-urls", + "List of URLs to listen on for http only client traffic. Enabling this flag removes http services from --listen-client-urls.", + ) + fs.Var( + flags.NewUniqueURLsWithExceptions("", ""), + "listen-metrics-urls", + "List of URLs to listen on for the metrics and health endpoints.", + ) + fs.UintVar(&cfg.MaxSnapFiles, "max-snapshots", cfg.MaxSnapFiles, "Maximum number of snapshot files to retain (0 is unlimited).") + fs.UintVar(&cfg.MaxWalFiles, "max-wals", cfg.MaxWalFiles, "Maximum number of wal files to retain (0 is unlimited).") + fs.StringVar(&cfg.Name, "name", cfg.Name, "Human-readable name for this member.") + fs.Uint64Var(&cfg.SnapshotCount, "snapshot-count", cfg.SnapshotCount, "Number of committed transactions to trigger a snapshot to disk.") + fs.UintVar(&cfg.TickMs, "heartbeat-interval", cfg.TickMs, "Time (in milliseconds) of a heartbeat interval.") + fs.UintVar(&cfg.ElectionMs, "election-timeout", cfg.ElectionMs, "Time (in milliseconds) for an election to timeout.") + fs.BoolVar(&cfg.InitialElectionTickAdvance, "initial-election-tick-advance", cfg.InitialElectionTickAdvance, "Whether to fast-forward initial election ticks on boot for faster election.") + fs.Int64Var(&cfg.QuotaBackendBytes, "quota-backend-bytes", cfg.QuotaBackendBytes, "Raise alarms when backend size exceeds the given quota. 0 means use the default quota.") + fs.StringVar(&cfg.BackendFreelistType, "backend-bbolt-freelist-type", cfg.BackendFreelistType, "BackendFreelistType specifies the type of freelist that boltdb backend uses(array and map are supported types)") + fs.DurationVar(&cfg.BackendBatchInterval, "backend-batch-interval", cfg.BackendBatchInterval, "BackendBatchInterval is the maximum time before commit the backend transaction.") + fs.IntVar(&cfg.BackendBatchLimit, "backend-batch-limit", cfg.BackendBatchLimit, "BackendBatchLimit is the maximum operations before commit the backend transaction.") + fs.UintVar(&cfg.MaxTxnOps, "max-txn-ops", cfg.MaxTxnOps, "Maximum number of operations permitted in a transaction.") + fs.UintVar(&cfg.MaxRequestBytes, "max-request-bytes", cfg.MaxRequestBytes, "Maximum client request size in bytes the server will accept.") + fs.DurationVar(&cfg.GRPCKeepAliveMinTime, "grpc-keepalive-min-time", cfg.GRPCKeepAliveMinTime, "Minimum interval duration that a client should wait before pinging server.") + fs.DurationVar(&cfg.GRPCKeepAliveInterval, "grpc-keepalive-interval", cfg.GRPCKeepAliveInterval, "Frequency duration of server-to-client ping to check if a connection is alive (0 to disable).") + fs.DurationVar(&cfg.GRPCKeepAliveTimeout, "grpc-keepalive-timeout", cfg.GRPCKeepAliveTimeout, "Additional duration of wait before closing a non-responsive connection (0 to disable).") + fs.BoolVar(&cfg.SocketOpts.ReusePort, "socket-reuse-port", cfg.SocketOpts.ReusePort, "Enable to set socket option SO_REUSEPORT on listeners allowing rebinding of a port already in use.") + fs.BoolVar(&cfg.SocketOpts.ReuseAddress, "socket-reuse-address", cfg.SocketOpts.ReuseAddress, "Enable to set socket option SO_REUSEADDR on listeners allowing binding to an address in `TIME_WAIT` state.") + + fs.Var(flags.NewUint32Value(cfg.MaxConcurrentStreams), "max-concurrent-streams", "Maximum concurrent streams that each client can open at a time.") + + // raft connection timeouts + fs.DurationVar(&rafthttp.ConnReadTimeout, "raft-read-timeout", rafthttp.DefaultConnReadTimeout, "Read timeout set on each rafthttp connection") + fs.DurationVar(&rafthttp.ConnWriteTimeout, "raft-write-timeout", rafthttp.DefaultConnWriteTimeout, "Write timeout set on each rafthttp connection") + + // clustering + fs.Var( + flags.NewUniqueURLsWithExceptions(DefaultInitialAdvertisePeerURLs, ""), + "initial-advertise-peer-urls", + "List of this member's peer URLs to advertise to the rest of the cluster.", + ) + fs.BoolVar(&cfg.ExperimentalSetMemberLocalAddr, "experimental-set-member-localaddr", false, "Enable to have etcd use the first specified and non-loopback host from initial-advertise-peer-urls as the local address when communicating with a peer.") + + fs.Var( + flags.NewUniqueURLsWithExceptions(DefaultAdvertiseClientURLs, ""), + "advertise-client-urls", + "List of this member's client URLs to advertise to the public.", + ) + + fs.StringVar(&cfg.Durl, "discovery", cfg.Durl, "Discovery URL used to bootstrap the cluster for v2 discovery. Will be deprecated in v3.7, and be decommissioned in v3.8.") + + fs.Var( + flags.NewUniqueStringsValue(""), + "discovery-endpoints", + "V3 discovery: List of gRPC endpoints of the discovery service.", + ) + fs.StringVar(&cfg.DiscoveryCfg.Token, "discovery-token", "", "V3 discovery: discovery token for the etcd cluster to be bootstrapped.") + fs.DurationVar(&cfg.DiscoveryCfg.DialTimeout, "discovery-dial-timeout", cfg.DiscoveryCfg.DialTimeout, "V3 discovery: dial timeout for client connections.") + fs.DurationVar(&cfg.DiscoveryCfg.RequestTimeout, "discovery-request-timeout", cfg.DiscoveryCfg.RequestTimeout, "V3 discovery: timeout for discovery requests (excluding dial timeout).") + fs.DurationVar(&cfg.DiscoveryCfg.KeepAliveTime, "discovery-keepalive-time", cfg.DiscoveryCfg.KeepAliveTime, "V3 discovery: keepalive time for client connections.") + fs.DurationVar(&cfg.DiscoveryCfg.KeepAliveTimeout, "discovery-keepalive-timeout", cfg.DiscoveryCfg.KeepAliveTimeout, "V3 discovery: keepalive timeout for client connections.") + fs.BoolVar(&cfg.DiscoveryCfg.Secure.InsecureTransport, "discovery-insecure-transport", true, "V3 discovery: disable transport security for client connections.") + fs.BoolVar(&cfg.DiscoveryCfg.Secure.InsecureSkipVerify, "discovery-insecure-skip-tls-verify", false, "V3 discovery: skip server certificate verification (CAUTION: this option should be enabled only for testing purposes).") + fs.StringVar(&cfg.DiscoveryCfg.Secure.Cert, "discovery-cert", "", "V3 discovery: identify secure client using this TLS certificate file.") + fs.StringVar(&cfg.DiscoveryCfg.Secure.Key, "discovery-key", "", "V3 discovery: identify secure client using this TLS key file.") + fs.StringVar(&cfg.DiscoveryCfg.Secure.Cacert, "discovery-cacert", "", "V3 discovery: verify certificates of TLS-enabled secure servers using this CA bundle.") + fs.StringVar(&cfg.DiscoveryCfg.Auth.Username, "discovery-user", "", "V3 discovery: username[:password] for authentication (prompt if password is not supplied).") + fs.StringVar(&cfg.DiscoveryCfg.Auth.Password, "discovery-password", "", "V3 discovery: password for authentication (if this option is used, --user option shouldn't include password).") + + fs.StringVar(&cfg.Dproxy, "discovery-proxy", cfg.Dproxy, "HTTP proxy to use for traffic to discovery service. Will be deprecated in v3.7, and be decommissioned in v3.8.") + fs.StringVar(&cfg.DNSCluster, "discovery-srv", cfg.DNSCluster, "DNS domain used to bootstrap initial cluster.") + fs.StringVar(&cfg.DNSClusterServiceName, "discovery-srv-name", cfg.DNSClusterServiceName, "Service name to query when using DNS discovery.") + fs.StringVar(&cfg.InitialCluster, "initial-cluster", cfg.InitialCluster, "Initial cluster configuration for bootstrapping.") + fs.StringVar(&cfg.InitialClusterToken, "initial-cluster-token", cfg.InitialClusterToken, "Initial cluster token for the etcd cluster during bootstrap.") + fs.BoolVar(&cfg.StrictReconfigCheck, "strict-reconfig-check", cfg.StrictReconfigCheck, "Reject reconfiguration requests that would cause quorum loss.") + + fs.BoolVar(&cfg.PreVote, "pre-vote", cfg.PreVote, "Enable the raft Pre-Vote algorithm to prevent disruption when a node that has been partitioned away rejoins the cluster.") + + // security + fs.StringVar(&cfg.ClientTLSInfo.CertFile, "cert-file", "", "Path to the client server TLS cert file.") + fs.StringVar(&cfg.ClientTLSInfo.KeyFile, "key-file", "", "Path to the client server TLS key file.") + fs.StringVar(&cfg.ClientTLSInfo.ClientCertFile, "client-cert-file", "", "Path to an explicit peer client TLS cert file otherwise cert file will be used when client auth is required.") + fs.StringVar(&cfg.ClientTLSInfo.ClientKeyFile, "client-key-file", "", "Path to an explicit peer client TLS key file otherwise key file will be used when client auth is required.") + fs.BoolVar(&cfg.ClientTLSInfo.ClientCertAuth, "client-cert-auth", false, "Enable client cert authentication.") + fs.StringVar(&cfg.ClientTLSInfo.CRLFile, "client-crl-file", "", "Path to the client certificate revocation list file.") + fs.Var(flags.NewStringsValue(""), "client-cert-allowed-hostname", "Comma-separated list of allowed SAN hostnames for client cert authentication.") + fs.StringVar(&cfg.ClientTLSInfo.TrustedCAFile, "trusted-ca-file", "", "Path to the client server TLS trusted CA cert file.") + fs.BoolVar(&cfg.ClientAutoTLS, "auto-tls", false, "Client TLS using generated certificates") + fs.StringVar(&cfg.PeerTLSInfo.CertFile, "peer-cert-file", "", "Path to the peer server TLS cert file.") + fs.StringVar(&cfg.PeerTLSInfo.KeyFile, "peer-key-file", "", "Path to the peer server TLS key file.") + fs.StringVar(&cfg.PeerTLSInfo.ClientCertFile, "peer-client-cert-file", "", "Path to an explicit peer client TLS cert file otherwise peer cert file will be used when client auth is required.") + fs.StringVar(&cfg.PeerTLSInfo.ClientKeyFile, "peer-client-key-file", "", "Path to an explicit peer client TLS key file otherwise peer key file will be used when client auth is required.") + fs.BoolVar(&cfg.PeerTLSInfo.ClientCertAuth, "peer-client-cert-auth", false, "Enable peer client cert authentication.") + fs.StringVar(&cfg.PeerTLSInfo.TrustedCAFile, "peer-trusted-ca-file", "", "Path to the peer server TLS trusted CA file.") + fs.BoolVar(&cfg.PeerAutoTLS, "peer-auto-tls", false, "Peer TLS using generated certificates") + fs.UintVar(&cfg.SelfSignedCertValidity, "self-signed-cert-validity", 1, "The validity period of the client and peer certificates, unit is year") + fs.StringVar(&cfg.PeerTLSInfo.CRLFile, "peer-crl-file", "", "Path to the peer certificate revocation list file.") + fs.Var(flags.NewStringsValue(""), "peer-cert-allowed-cn", "Comma-separated list of allowed CNs for inter-peer TLS authentication.") + fs.Var(flags.NewStringsValue(""), "peer-cert-allowed-hostname", "Comma-separated list of allowed SAN hostnames for inter-peer TLS authentication.") + fs.Var(flags.NewStringsValue(""), "cipher-suites", "Comma-separated list of supported TLS cipher suites between client/server and peers (empty will be auto-populated by Go).") + fs.BoolVar(&cfg.PeerTLSInfo.SkipClientSANVerify, "experimental-peer-skip-client-san-verification", false, "Skip verification of SAN field in client certificate for peer connections.") + fs.StringVar(&cfg.TlsMinVersion, "tls-min-version", string(tlsutil.TLSVersion12), "Minimum TLS version supported by etcd. Possible values: TLS1.2, TLS1.3.") + fs.StringVar(&cfg.TlsMaxVersion, "tls-max-version", string(tlsutil.TLSVersionDefault), "Maximum TLS version supported by etcd. Possible values: TLS1.2, TLS1.3 (empty defers to Go).") + + fs.Var( + flags.NewUniqueURLsWithExceptions("*", "*"), + "cors", + "Comma-separated white list of origins for CORS, or cross-origin resource sharing, (empty or * means allow all)", + ) + fs.Var(flags.NewUniqueStringsValue("*"), "host-whitelist", "Comma-separated acceptable hostnames from HTTP client requests, if server is not secure (empty means allow all).") + + // logging + fs.StringVar(&cfg.Logger, "logger", "zap", "Currently only supports 'zap' for structured logging.") + fs.Var(flags.NewUniqueStringsValue(DefaultLogOutput), "log-outputs", "Specify 'stdout' or 'stderr' to skip journald logging even when running under systemd, or list of comma separated output targets.") + fs.StringVar(&cfg.LogLevel, "log-level", logutil.DefaultLogLevel, "Configures log level. Only supports debug, info, warn, error, panic, or fatal. Default 'info'.") + fs.StringVar(&cfg.LogFormat, "log-format", logutil.DefaultLogFormat, "Configures log format. Only supports json, console. Default is 'json'.") + fs.BoolVar(&cfg.EnableLogRotation, "enable-log-rotation", false, "Enable log rotation of a single log-outputs file target.") + fs.StringVar(&cfg.LogRotationConfigJSON, "log-rotation-config-json", DefaultLogRotationConfig, "Configures log rotation if enabled with a JSON logger config. Default: MaxSize=100(MB), MaxAge=0(days,no limit), MaxBackups=0(no limit), LocalTime=false(UTC), Compress=false(gzip)") + + fs.StringVar(&cfg.AutoCompactionRetention, "auto-compaction-retention", "0", "Auto compaction retention for mvcc key value store. 0 means disable auto compaction.") + fs.StringVar(&cfg.AutoCompactionMode, "auto-compaction-mode", "periodic", "interpret 'auto-compaction-retention' one of: periodic|revision. 'periodic' for duration based retention, defaulting to hours if no time unit is provided (e.g. '5m'). 'revision' for revision number based retention.") + + // pprof profiler via HTTP + fs.BoolVar(&cfg.EnablePprof, "enable-pprof", false, "Enable runtime profiling data via HTTP server. Address is at client URL + \"/debug/pprof/\"") + + // additional metrics + fs.StringVar(&cfg.Metrics, "metrics", cfg.Metrics, "Set level of detail for exported metrics, specify 'extensive' to include server side grpc histogram metrics") + + // experimental distributed tracing + fs.BoolVar(&cfg.ExperimentalEnableDistributedTracing, "experimental-enable-distributed-tracing", false, "Enable experimental distributed tracing using OpenTelemetry Tracing.") + fs.StringVar(&cfg.ExperimentalDistributedTracingAddress, "experimental-distributed-tracing-address", ExperimentalDistributedTracingAddress, "Address for distributed tracing used for OpenTelemetry Tracing (if enabled with experimental-enable-distributed-tracing flag).") + fs.StringVar(&cfg.ExperimentalDistributedTracingServiceName, "experimental-distributed-tracing-service-name", ExperimentalDistributedTracingServiceName, "Configures service name for distributed tracing to be used to define service name for OpenTelemetry Tracing (if enabled with experimental-enable-distributed-tracing flag). 'etcd' is the default service name. Use the same service name for all instances of etcd.") + fs.StringVar(&cfg.ExperimentalDistributedTracingServiceInstanceID, "experimental-distributed-tracing-instance-id", "", "Configures service instance ID for distributed tracing to be used to define service instance ID key for OpenTelemetry Tracing (if enabled with experimental-enable-distributed-tracing flag). There is no default value set. This ID must be unique per etcd instance.") + fs.IntVar(&cfg.ExperimentalDistributedTracingSamplingRatePerMillion, "experimental-distributed-tracing-sampling-rate", 0, "Number of samples to collect per million spans for OpenTelemetry Tracing (if enabled with experimental-enable-distributed-tracing flag).") + + // auth + fs.StringVar(&cfg.AuthToken, "auth-token", cfg.AuthToken, "Specify auth token specific options.") + fs.UintVar(&cfg.BcryptCost, "bcrypt-cost", cfg.BcryptCost, "Specify bcrypt algorithm cost factor for auth password hashing.") + fs.UintVar(&cfg.AuthTokenTTL, "auth-token-ttl", cfg.AuthTokenTTL, "The lifetime in seconds of the auth token.") + + // gateway + fs.BoolVar(&cfg.EnableGRPCGateway, "enable-grpc-gateway", cfg.EnableGRPCGateway, "Enable GRPC gateway.") + + // experimental + fs.BoolVar(&cfg.ExperimentalInitialCorruptCheck, "experimental-initial-corrupt-check", cfg.ExperimentalInitialCorruptCheck, "Enable to check data corruption before serving any client/peer traffic.") + fs.DurationVar(&cfg.ExperimentalCorruptCheckTime, "experimental-corrupt-check-time", cfg.ExperimentalCorruptCheckTime, "Duration of time between cluster corruption check passes.") + fs.BoolVar(&cfg.ExperimentalCompactHashCheckEnabled, "experimental-compact-hash-check-enabled", cfg.ExperimentalCompactHashCheckEnabled, "Enable leader to periodically check followers compaction hashes.") + fs.DurationVar(&cfg.ExperimentalCompactHashCheckTime, "experimental-compact-hash-check-time", cfg.ExperimentalCompactHashCheckTime, "Duration of time between leader checks followers compaction hashes.") + + fs.BoolVar(&cfg.ExperimentalEnableLeaseCheckpoint, "experimental-enable-lease-checkpoint", false, "Enable leader to send regular checkpoints to other members to prevent reset of remaining TTL on leader change.") + // TODO: delete in v3.7 + fs.BoolVar(&cfg.ExperimentalEnableLeaseCheckpointPersist, "experimental-enable-lease-checkpoint-persist", false, "Enable persisting remainingTTL to prevent indefinite auto-renewal of long lived leases. Always enabled in v3.6. Should be used to ensure smooth upgrade from v3.5 clusters with this feature enabled. Requires experimental-enable-lease-checkpoint to be enabled.") + fs.IntVar(&cfg.ExperimentalCompactionBatchLimit, "experimental-compaction-batch-limit", cfg.ExperimentalCompactionBatchLimit, "Sets the maximum revisions deleted in each compaction batch.") + fs.DurationVar(&cfg.ExperimentalCompactionSleepInterval, "experimental-compaction-sleep-interval", cfg.ExperimentalCompactionSleepInterval, "Sets the sleep interval between each compaction batch.") + fs.DurationVar(&cfg.ExperimentalWatchProgressNotifyInterval, "experimental-watch-progress-notify-interval", cfg.ExperimentalWatchProgressNotifyInterval, "Duration of periodic watch progress notifications.") + fs.DurationVar(&cfg.ExperimentalDowngradeCheckTime, "experimental-downgrade-check-time", cfg.ExperimentalDowngradeCheckTime, "Duration of time between two downgrade status checks.") + fs.DurationVar(&cfg.ExperimentalWarningApplyDuration, "experimental-warning-apply-duration", cfg.ExperimentalWarningApplyDuration, "Time duration after which a warning is generated if request takes more time.") + fs.DurationVar(&cfg.WarningUnaryRequestDuration, "warning-unary-request-duration", cfg.WarningUnaryRequestDuration, "Time duration after which a warning is generated if a unary request takes more time.") + fs.DurationVar(&cfg.ExperimentalWarningUnaryRequestDuration, "experimental-warning-unary-request-duration", cfg.ExperimentalWarningUnaryRequestDuration, "Time duration after which a warning is generated if a unary request takes more time. It's deprecated, and will be decommissioned in v3.7. Use --warning-unary-request-duration instead.") + fs.BoolVar(&cfg.ExperimentalMemoryMlock, "experimental-memory-mlock", cfg.ExperimentalMemoryMlock, "Enable to enforce etcd pages (in particular bbolt) to stay in RAM.") + fs.BoolVar(&cfg.ExperimentalTxnModeWriteWithSharedBuffer, "experimental-txn-mode-write-with-shared-buffer", true, "Enable the write transaction to use a shared buffer in its readonly check operations.") + fs.BoolVar(&cfg.ExperimentalStopGRPCServiceOnDefrag, "experimental-stop-grpc-service-on-defrag", cfg.ExperimentalStopGRPCServiceOnDefrag, "Enable etcd gRPC service to stop serving client requests on defragmentation.") + fs.UintVar(&cfg.ExperimentalBootstrapDefragThresholdMegabytes, "experimental-bootstrap-defrag-threshold-megabytes", 0, "Enable the defrag during etcd server bootstrap on condition that it will free at least the provided threshold of disk space. Needs to be set to non-zero value to take effect.") + fs.IntVar(&cfg.ExperimentalMaxLearners, "experimental-max-learners", membership.DefaultMaxLearners, "Sets the maximum number of learners that can be available in the cluster membership.") + fs.Uint64Var(&cfg.SnapshotCatchUpEntries, "experimental-snapshot-catchup-entries", cfg.SnapshotCatchUpEntries, "Number of entries for a slow follower to catch up after compacting the raft storage entries.") + + // unsafe + fs.BoolVar(&cfg.UnsafeNoFsync, "unsafe-no-fsync", false, "Disables fsync, unsafe, will cause data loss.") + fs.BoolVar(&cfg.ForceNewCluster, "force-new-cluster", false, "Force to create a new one member cluster.") + + // featuregate + cfg.ServerFeatureGate.(featuregate.MutableFeatureGate).AddFlag(fs, ServerFeatureGateFlagName) +} + func ConfigFromFile(path string) (*Config, error) { cfg := &configYAML{Config: *NewConfig()} if err := cfg.configFromFile(path); err != nil { @@ -542,7 +786,7 @@ func ConfigFromFile(path string) (*Config, error) { } func (cfg *configYAML) configFromFile(path string) error { - b, err := ioutil.ReadFile(path) + b, err := os.ReadFile(path) if err != nil { return err } @@ -554,8 +798,34 @@ func (cfg *configYAML) configFromFile(path string) error { return err } - if cfg.configJSON.ListenPeerUrls != "" { - u, err := types.NewURLs(strings.Split(cfg.configJSON.ListenPeerUrls, ",")) + if cfg.configJSON.ServerFeatureGatesJSON != "" { + err = cfg.Config.ServerFeatureGate.(featuregate.MutableFeatureGate).Set(cfg.configJSON.ServerFeatureGatesJSON) + if err != nil { + return err + } + } + + // parses the yaml bytes to raw map first, then getBoolFlagVal can get the top level bool flag value. + var cfgMap map[string]interface{} + err = yaml.Unmarshal(b, &cfgMap) + if err != nil { + return err + } + getBoolFlagVal := func(flagName string) *bool { + flagVal, ok := cfgMap[flagName] + if !ok { + return nil + } + boolVal := flagVal.(bool) + return &boolVal + } + err = SetFeatureGatesFromExperimentalFlags(cfg.ServerFeatureGate, getBoolFlagVal, cfg.configJSON.ServerFeatureGatesJSON) + if err != nil { + return err + } + + if cfg.configJSON.ListenPeerURLs != "" { + u, err := types.NewURLs(strings.Split(cfg.configJSON.ListenPeerURLs, ",")) if err != nil { fmt.Fprintf(os.Stderr, "unexpected error setting up listen-peer-urls: %v\n", err) os.Exit(1) @@ -563,8 +833,8 @@ func (cfg *configYAML) configFromFile(path string) error { cfg.Config.ListenPeerUrls = u } - if cfg.configJSON.ListenClientUrls != "" { - u, err := types.NewURLs(strings.Split(cfg.configJSON.ListenClientUrls, ",")) + if cfg.configJSON.ListenClientURLs != "" { + u, err := types.NewURLs(strings.Split(cfg.configJSON.ListenClientURLs, ",")) if err != nil { fmt.Fprintf(os.Stderr, "unexpected error setting up listen-client-urls: %v\n", err) os.Exit(1) @@ -572,8 +842,8 @@ func (cfg *configYAML) configFromFile(path string) error { cfg.Config.ListenClientUrls = u } - if cfg.configJSON.ListenClientHttpUrls != "" { - u, err := types.NewURLs(strings.Split(cfg.configJSON.ListenClientHttpUrls, ",")) + if cfg.configJSON.ListenClientHTTPURLs != "" { + u, err := types.NewURLs(strings.Split(cfg.configJSON.ListenClientHTTPURLs, ",")) if err != nil { fmt.Fprintf(os.Stderr, "unexpected error setting up listen-client-http-urls: %v\n", err) os.Exit(1) @@ -581,8 +851,8 @@ func (cfg *configYAML) configFromFile(path string) error { cfg.Config.ListenClientHttpUrls = u } - if cfg.configJSON.AdvertisePeerUrls != "" { - u, err := types.NewURLs(strings.Split(cfg.configJSON.AdvertisePeerUrls, ",")) + if cfg.configJSON.AdvertisePeerURLs != "" { + u, err := types.NewURLs(strings.Split(cfg.configJSON.AdvertisePeerURLs, ",")) if err != nil { fmt.Fprintf(os.Stderr, "unexpected error setting up initial-advertise-peer-urls: %v\n", err) os.Exit(1) @@ -590,8 +860,8 @@ func (cfg *configYAML) configFromFile(path string) error { cfg.Config.AdvertisePeerUrls = u } - if cfg.configJSON.AdvertiseClientUrls != "" { - u, err := types.NewURLs(strings.Split(cfg.configJSON.AdvertiseClientUrls, ",")) + if cfg.configJSON.AdvertiseClientURLs != "" { + u, err := types.NewURLs(strings.Split(cfg.configJSON.AdvertiseClientURLs, ",")) if err != nil { fmt.Fprintf(os.Stderr, "unexpected error setting up advertise-peer-urls: %v\n", err) os.Exit(1) @@ -605,7 +875,7 @@ func (cfg *configYAML) configFromFile(path string) error { fmt.Fprintf(os.Stderr, "unexpected error setting up listen-metrics-urls: %v\n", err) os.Exit(1) } - cfg.ListenMetricsUrls = []url.URL(u) + cfg.ListenMetricsUrls = u } if cfg.CORSJSON != "" { @@ -618,8 +888,8 @@ func (cfg *configYAML) configFromFile(path string) error { cfg.HostWhitelist = uv.Values } - // If a discovery flag is set, clear default initial cluster set by InitialClusterFromName - if (cfg.Durl != "" || cfg.DNSCluster != "") && cfg.InitialCluster == defaultInitialCluster { + // If a discovery or discovery-endpoints flag is set, clear default initial cluster set by InitialClusterFromName + if (cfg.Durl != "" || cfg.DNSCluster != "" || len(cfg.DiscoveryCfg.Endpoints) > 0) && cfg.InitialCluster == defaultInitialCluster { cfg.InitialCluster = "" } if cfg.ClusterState == "" { @@ -646,6 +916,36 @@ func (cfg *configYAML) configFromFile(path string) error { return cfg.Validate() } +// SetFeatureGatesFromExperimentalFlags sets the feature gate values if the feature gate is not explicitly set +// while their corresponding experimental flags are explicitly set, for all the features in ExperimentalFlagToFeatureMap. +// TODO: remove after all experimental flags are deprecated. +func SetFeatureGatesFromExperimentalFlags(fg featuregate.FeatureGate, getExperimentalFlagVal func(string) *bool, featureGatesVal string) error { + m := make(map[featuregate.Feature]bool) + // verify that the feature gate and its experimental flag are not both set at the same time. + for expFlagName, featureName := range features.ExperimentalFlagToFeatureMap { + flagVal := getExperimentalFlagVal(expFlagName) + if flagVal == nil { + continue + } + if strings.Contains(featureGatesVal, string(featureName)) { + return fmt.Errorf("cannot specify both flags: --%s=%v and --%s=%s=%v at the same time, please just use --%s=%s=%v", + expFlagName, *flagVal, ServerFeatureGateFlagName, featureName, fg.Enabled(featureName), ServerFeatureGateFlagName, featureName, fg.Enabled(featureName)) + } + m[featureName] = *flagVal + } + + // filter out unknown features for fg, because we could use SetFeatureGatesFromExperimentalFlags both for + // server and cluster level feature gates. + allFeatures := fg.(featuregate.MutableFeatureGate).GetAll() + mFiltered := make(map[string]bool) + for k, v := range m { + if _, ok := allFeatures[k]; ok { + mFiltered[string(k)] = v + } + } + return fg.(featuregate.MutableFeatureGate).SetFromMap(mFiltered) +} + func updateCipherSuites(tls *transport.TLSInfo, ss []string) error { if len(tls.CipherSuites) > 0 && len(ss) > 0 { return fmt.Errorf("TLSInfo.CipherSuites is already specified (given %v)", ss) @@ -692,16 +992,16 @@ func (cfg *Config) Validate() error { return err } if err := checkHostURLs(cfg.AdvertisePeerUrls); err != nil { - addrs := cfg.getAdvertisePeerUrls() - return fmt.Errorf(`--initial-advertise-peer-urls %q must be "host:port" (%v)`, strings.Join(addrs, ","), err) + addrs := cfg.getAdvertisePeerURLs() + return fmt.Errorf(`--initial-advertise-peer-urls %q must be "host:port" (%w)`, strings.Join(addrs, ","), err) } if err := checkHostURLs(cfg.AdvertiseClientUrls); err != nil { - addrs := cfg.getAdvertiseClientUrls() - return fmt.Errorf(`--advertise-client-urls %q must be "host:port" (%v)`, strings.Join(addrs, ","), err) + addrs := cfg.getAdvertiseClientURLs() + return fmt.Errorf(`--advertise-client-urls %q must be "host:port" (%w)`, strings.Join(addrs, ","), err) } // Check if conflicting flags are passed. nSet := 0 - for _, v := range []bool{cfg.Durl != "", cfg.InitialCluster != "", cfg.DNSCluster != ""} { + for _, v := range []bool{cfg.Durl != "", cfg.InitialCluster != "", cfg.DNSCluster != "", len(cfg.DiscoveryCfg.Endpoints) > 0} { if v { nSet++ } @@ -715,6 +1015,28 @@ func (cfg *Config) Validate() error { return ErrConflictBootstrapFlags } + // Check if both v2 discovery and v3 discovery flags are passed. + v2discoveryFlagsExist := cfg.Dproxy != "" + v3discoveryFlagsExist := len(cfg.DiscoveryCfg.Endpoints) > 0 || + cfg.DiscoveryCfg.Token != "" || + cfg.DiscoveryCfg.Secure.Cert != "" || + cfg.DiscoveryCfg.Secure.Key != "" || + cfg.DiscoveryCfg.Secure.Cacert != "" || + cfg.DiscoveryCfg.Auth.Username != "" || + cfg.DiscoveryCfg.Auth.Password != "" + + if v2discoveryFlagsExist && v3discoveryFlagsExist { + return errors.New("both v2 discovery settings (discovery, discovery-proxy) " + + "and v3 discovery settings (discovery-token, discovery-endpoints, discovery-cert, " + + "discovery-key, discovery-cacert, discovery-user, discovery-password) are set") + } + + // If one of `discovery-token` and `discovery-endpoints` is provided, + // then the other one must be provided as well. + if (cfg.DiscoveryCfg.Token != "") != (len(cfg.DiscoveryCfg.Endpoints) > 0) { + return errors.New("both --discovery-token and --discovery-endpoints must be set") + } + if cfg.TickMs == 0 { return fmt.Errorf("--heartbeat-interval must be >0 (set to %dms)", cfg.TickMs) } @@ -734,12 +1056,20 @@ func (cfg *Config) Validate() error { } switch cfg.AutoCompactionMode { - case "": case CompactorModeRevision, CompactorModePeriodic: + case "": + return errors.New("undefined auto-compaction-mode") default: return fmt.Errorf("unknown auto-compaction-mode %q", cfg.AutoCompactionMode) } + // Validate distributed tracing configuration but only if enabled. + if cfg.ExperimentalEnableDistributedTracing { + if err := validateTracingConfig(cfg.ExperimentalDistributedTracingSamplingRatePerMillion); err != nil { + return fmt.Errorf("distributed tracing configurition is not valid: (%w)", err) + } + } + if !cfg.ExperimentalEnableLeaseCheckpointPersist && cfg.ExperimentalEnableLeaseCheckpoint { cfg.logger.Warn("Detected that checkpointing is enabled without persistence. Consider enabling experimental-enable-lease-checkpoint-persist") } @@ -752,6 +1082,18 @@ func (cfg *Config) Validate() error { return fmt.Errorf("--experimental-compact-hash-check-time must be >0 (set to %v)", cfg.ExperimentalCompactHashCheckTime) } + // If `--name` isn't configured, then multiple members may have the same "default" name. + // When adding a new member with the "default" name as well, etcd may regards its peerURL + // as one additional peerURL of the existing member which has the same "default" name, + // because each member can have multiple client or peer URLs. + // Please refer to https://github.com/etcd-io/etcd/issues/13757 + if cfg.Name == DefaultName { + cfg.logger.Warn( + "it isn't recommended to use default name, please set a value for --name. "+ + "Note that etcd might run into issue when multiple members have the same default name", + zap.String("name", cfg.Name)) + } + minVersion, err := tlsutil.GetTLSVersion(cfg.TlsMinVersion) if err != nil { return err @@ -771,13 +1113,6 @@ func (cfg *Config) Validate() error { return fmt.Errorf("cipher suites cannot be configured when only TLS1.3 is enabled") } - // Validate distributed tracing configuration but only if enabled. - if cfg.ExperimentalEnableDistributedTracing { - if err := validateTracingConfig(cfg.ExperimentalDistributedTracingSamplingRatePerMillion); err != nil { - return fmt.Errorf("distributed tracing configurition is not valid: (%v)", err) - } - } - return nil } @@ -787,11 +1122,18 @@ func (cfg *Config) PeerURLsMapAndToken(which string) (urlsmap types.URLsMap, tok switch { case cfg.Durl != "": urlsmap = types.URLsMap{} - // If using discovery, generate a temporary cluster based on + // If using v2 discovery, generate a temporary cluster based on // self's advertised peer URLs urlsmap[cfg.Name] = cfg.AdvertisePeerUrls token = cfg.Durl + case len(cfg.DiscoveryCfg.Endpoints) > 0: + urlsmap = types.URLsMap{} + // If using v3 discovery, generate a temporary cluster based on + // self's advertised peer URLs + urlsmap[cfg.Name] = cfg.AdvertisePeerUrls + token = cfg.DiscoveryCfg.Token + case cfg.DNSCluster != "": clusterStrs, cerr := cfg.GetDNSClusterNames() lg := cfg.logger @@ -853,7 +1195,7 @@ func (cfg *Config) GetDNSClusterNames() ([]string, error) { zap.String("service-name", "etcd-server-ssl"+serviceNameSuffix), zap.String("server-name", cfg.Name), zap.String("discovery-srv", cfg.DNSCluster), - zap.Strings("advertise-peer-urls", cfg.getAdvertisePeerUrls()), + zap.Strings("advertise-peer-urls", cfg.getAdvertisePeerURLs()), zap.Strings("found-cluster", clusterStrs), zap.Error(cerr), ) @@ -868,15 +1210,15 @@ func (cfg *Config) GetDNSClusterNames() ([]string, error) { zap.String("service-name", "etcd-server"+serviceNameSuffix), zap.String("server-name", cfg.Name), zap.String("discovery-srv", cfg.DNSCluster), - zap.Strings("advertise-peer-urls", cfg.getAdvertisePeerUrls()), + zap.Strings("advertise-peer-urls", cfg.getAdvertisePeerURLs()), zap.Strings("found-cluster", clusterStrs), zap.Error(httpCerr), ) - return clusterStrs, multierr.Combine(cerr, httpCerr) + return clusterStrs, errors.Join(cerr, httpCerr) } -func (cfg Config) InitialClusterFromName(name string) (ret string) { +func (cfg *Config) InitialClusterFromName(name string) (ret string) { if len(cfg.AdvertisePeerUrls) == 0 { return "" } @@ -890,21 +1232,55 @@ func (cfg Config) InitialClusterFromName(name string) (ret string) { return ret[1:] } -func (cfg Config) IsNewCluster() bool { return cfg.ClusterState == ClusterStateFlagNew } -func (cfg Config) ElectionTicks() int { return int(cfg.ElectionMs / cfg.TickMs) } +// InferLocalAddr tries to determine the LocalAddr used when communicating with +// an etcd peer. If SetMemberLocalAddr is true, then it will try to get the host +// from AdvertisePeerUrls by searching for the first URL with a specified +// non-loopback address. Otherwise, it defaults to empty string and the +// LocalAddr used will be the default for the Golang HTTP client. +func (cfg *Config) InferLocalAddr() string { + if !cfg.ExperimentalSetMemberLocalAddr { + return "" + } -func (cfg Config) V2DeprecationEffective() config.V2DeprecationEnum { + lg := cfg.GetLogger() + lg.Info( + "searching for a suitable member local address in AdvertisePeerURLs", + zap.Strings("advertise-peer-urls", cfg.getAdvertisePeerURLs()), + ) + for _, peerURL := range cfg.AdvertisePeerUrls { + if addr, err := netip.ParseAddr(peerURL.Hostname()); err == nil { + if addr.IsLoopback() || addr.IsUnspecified() { + continue + } + lg.Info( + "setting member local address", + zap.String("LocalAddr", addr.String()), + ) + return addr.String() + } + } + lg.Warn( + "unable to set a member local address due to lack of suitable local addresses", + zap.Strings("advertise-peer-urls", cfg.getAdvertisePeerURLs()), + ) + return "" +} + +func (cfg *Config) IsNewCluster() bool { return cfg.ClusterState == ClusterStateFlagNew } +func (cfg *Config) ElectionTicks() int { return int(cfg.ElectionMs / cfg.TickMs) } + +func (cfg *Config) V2DeprecationEffective() config.V2DeprecationEnum { if cfg.V2Deprecation == "" { - return config.V2_DEPR_DEFAULT + return config.V2DeprDefault } return cfg.V2Deprecation } -func (cfg Config) defaultPeerHost() bool { +func (cfg *Config) defaultPeerHost() bool { return len(cfg.AdvertisePeerUrls) == 1 && cfg.AdvertisePeerUrls[0].String() == DefaultInitialAdvertisePeerURLs } -func (cfg Config) defaultClientHost() bool { +func (cfg *Config) defaultClientHost() bool { return len(cfg.AdvertiseClientUrls) == 1 && cfg.AdvertiseClientUrls[0].String() == DefaultAdvertiseClientURLs } @@ -1024,7 +1400,7 @@ func checkHostURLs(urls []url.URL) error { return nil } -func (cfg *Config) getAdvertisePeerUrls() (ss []string) { +func (cfg *Config) getAdvertisePeerURLs() (ss []string) { ss = make([]string, len(cfg.AdvertisePeerUrls)) for i := range cfg.AdvertisePeerUrls { ss[i] = cfg.AdvertisePeerUrls[i].String() @@ -1032,7 +1408,7 @@ func (cfg *Config) getAdvertisePeerUrls() (ss []string) { return ss } -func (cfg *Config) getListenPeerUrls() (ss []string) { +func (cfg *Config) getListenPeerURLs() (ss []string) { ss = make([]string, len(cfg.ListenPeerUrls)) for i := range cfg.ListenPeerUrls { ss[i] = cfg.ListenPeerUrls[i].String() @@ -1040,7 +1416,7 @@ func (cfg *Config) getListenPeerUrls() (ss []string) { return ss } -func (cfg *Config) getAdvertiseClientUrls() (ss []string) { +func (cfg *Config) getAdvertiseClientURLs() (ss []string) { ss = make([]string, len(cfg.AdvertiseClientUrls)) for i := range cfg.AdvertiseClientUrls { ss[i] = cfg.AdvertiseClientUrls[i].String() @@ -1048,7 +1424,7 @@ func (cfg *Config) getAdvertiseClientUrls() (ss []string) { return ss } -func (cfg *Config) getListenClientUrls() (ss []string) { +func (cfg *Config) getListenClientURLs() (ss []string) { ss = make([]string, len(cfg.ListenClientUrls)) for i := range cfg.ListenClientUrls { ss[i] = cfg.ListenClientUrls[i].String() @@ -1056,14 +1432,6 @@ func (cfg *Config) getListenClientUrls() (ss []string) { return ss } -func (cfg *Config) getListenClientHttpUrls() (ss []string) { - ss = make([]string, len(cfg.ListenClientHttpUrls)) - for i := range cfg.ListenClientHttpUrls { - ss[i] = cfg.ListenClientHttpUrls[i].String() - } - return ss -} - func (cfg *Config) getMetricsURLs() (ss []string) { ss = make([]string, len(cfg.ListenMetricsUrls)) for i := range cfg.ListenMetricsUrls { diff --git a/vendor/go.etcd.io/etcd/server/v3/embed/config_logging.go b/vendor/go.etcd.io/etcd/server/v3/embed/config_logging.go index 1b363897b53cc..ddf19cdbb525d 100644 --- a/vendor/go.etcd.io/etcd/server/v3/embed/config_logging.go +++ b/vendor/go.etcd.io/etcd/server/v3/embed/config_logging.go @@ -19,21 +19,22 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" + "io" "net/url" "os" - "go.etcd.io/etcd/client/pkg/v3/logutil" "go.uber.org/zap" "go.uber.org/zap/zapcore" "go.uber.org/zap/zapgrpc" "google.golang.org/grpc" "google.golang.org/grpc/grpclog" "gopkg.in/natefinch/lumberjack.v2" + + "go.etcd.io/etcd/client/pkg/v3/logutil" ) // GetLogger returns the logger. -func (cfg Config) GetLogger() *zap.Logger { +func (cfg *Config) GetLogger() *zap.Logger { cfg.loggerMu.RLock() l := cfg.logger cfg.loggerMu.RUnlock() @@ -106,6 +107,11 @@ func (cfg *Config) setupLogging() error { copied.ErrorOutputPaths = errOutputPaths copied = logutil.MergeOutputPaths(copied) copied.Level = zap.NewAtomicLevelAt(logutil.ConvertToZapLevel(cfg.LogLevel)) + encoding, err := logutil.ConvertToZapFormat(cfg.LogFormat) + if err != nil { + return err + } + copied.Encoding = encoding if cfg.ZapLoggerBuilder == nil { lg, err := copied.Build() if err != nil { @@ -130,10 +136,22 @@ func (cfg *Config) setupLogging() error { lvl := zap.NewAtomicLevelAt(logutil.ConvertToZapLevel(cfg.LogLevel)) + var encoder zapcore.Encoder + encoding, err := logutil.ConvertToZapFormat(cfg.LogFormat) + if err != nil { + return err + } + + if encoding == logutil.ConsoleLogFormat { + encoder = zapcore.NewConsoleEncoder(logutil.DefaultZapLoggerConfig.EncoderConfig) + } else { + encoder = zapcore.NewJSONEncoder(logutil.DefaultZapLoggerConfig.EncoderConfig) + } + // WARN: do not change field names in encoder config // journald logging writer assumes field names of "level" and "caller" cr := zapcore.NewCore( - zapcore.NewJSONEncoder(logutil.DefaultZapLoggerConfig.EncoderConfig), + encoder, syncer, lvl, ) @@ -187,7 +205,7 @@ func (cfg *Config) setupLogging() error { return nil } -// NewZapLoggerBuilder generates a zap logger builder that sets given loger +// NewZapLoggerBuilder generates a zap logger builder that sets given logger // for embedded etcd. func NewZapLoggerBuilder(lg *zap.Logger) func(*Config) error { return func(cfg *Config) error { @@ -216,7 +234,7 @@ func (cfg *Config) SetupGlobalLoggers() { grpc.EnableTracing = true grpclog.SetLoggerV2(zapgrpc.NewLogger(lg)) } else { - grpclog.SetLoggerV2(grpclog.NewLoggerV2(ioutil.Discard, os.Stderr, os.Stderr)) + grpclog.SetLoggerV2(grpclog.NewLoggerV2(io.Discard, os.Stderr, os.Stderr)) } zap.ReplaceGlobals(lg) } @@ -231,7 +249,7 @@ func (logRotationConfig) Sync() error { return nil } // setupLogRotation initializes log rotation for a single file path target. func setupLogRotation(logOutputs []string, logRotateConfigJSON string) error { - var logRotationConfig logRotationConfig + var logRotationCfg logRotationConfig outputFilePaths := 0 for _, v := range logOutputs { switch v { @@ -250,7 +268,7 @@ func setupLogRotation(logOutputs []string, logRotateConfigJSON string) error { return ErrLogRotationInvalidLogOutput } - if err := json.Unmarshal([]byte(logRotateConfigJSON), &logRotationConfig); err != nil { + if err := json.Unmarshal([]byte(logRotateConfigJSON), &logRotationCfg); err != nil { var unmarshalTypeError *json.UnmarshalTypeError var syntaxError *json.SyntaxError switch { @@ -258,11 +276,13 @@ func setupLogRotation(logOutputs []string, logRotateConfigJSON string) error { return fmt.Errorf("improperly formatted log rotation config: %w", err) case errors.As(err, &unmarshalTypeError): return fmt.Errorf("invalid log rotation config: %w", err) + default: + return fmt.Errorf("fail to unmarshal log rotation config: %w", err) } } zap.RegisterSink("rotate", func(u *url.URL) (zap.Sink, error) { - logRotationConfig.Filename = u.Path[1:] - return &logRotationConfig, nil + logRotationCfg.Filename = u.Path[1:] + return &logRotationCfg, nil }) return nil } diff --git a/vendor/go.etcd.io/etcd/server/v3/embed/config_logging_journal_unix.go b/vendor/go.etcd.io/etcd/server/v3/embed/config_logging_journal_unix.go index 76bb73265fb4e..478dc65d60dc0 100644 --- a/vendor/go.etcd.io/etcd/server/v3/embed/config_logging_journal_unix.go +++ b/vendor/go.etcd.io/etcd/server/v3/embed/config_logging_journal_unix.go @@ -13,7 +13,6 @@ // limitations under the License. //go:build !windows -// +build !windows package embed @@ -21,16 +20,16 @@ import ( "fmt" "os" - "go.etcd.io/etcd/client/pkg/v3/logutil" - "go.uber.org/zap/zapcore" + + "go.etcd.io/etcd/client/pkg/v3/logutil" ) // use stderr as fallback func getJournalWriteSyncer() (zapcore.WriteSyncer, error) { jw, err := logutil.NewJournalWriter(os.Stderr) if err != nil { - return nil, fmt.Errorf("can't find journal (%v)", err) + return nil, fmt.Errorf("can't find journal (%w)", err) } return zapcore.AddSync(jw), nil } diff --git a/vendor/go.etcd.io/etcd/server/v3/embed/config_logging_journal_windows.go b/vendor/go.etcd.io/etcd/server/v3/embed/config_logging_journal_windows.go index 58ed08631bbad..90dfad944e44e 100644 --- a/vendor/go.etcd.io/etcd/server/v3/embed/config_logging_journal_windows.go +++ b/vendor/go.etcd.io/etcd/server/v3/embed/config_logging_journal_windows.go @@ -13,7 +13,6 @@ // limitations under the License. //go:build windows -// +build windows package embed diff --git a/vendor/go.etcd.io/etcd/server/v3/embed/config_tracing.go b/vendor/go.etcd.io/etcd/server/v3/embed/config_tracing.go index 382bc00e766bf..7fd86e8610f0e 100644 --- a/vendor/go.etcd.io/etcd/server/v3/embed/config_tracing.go +++ b/vendor/go.etcd.io/etcd/server/v3/embed/config_tracing.go @@ -1,4 +1,4 @@ -// Copyright 2022 The etcd Authors +// Copyright 2021 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -23,7 +23,7 @@ import ( "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/sdk/resource" tracesdk "go.opentelemetry.io/otel/sdk/trace" - semconv "go.opentelemetry.io/otel/semconv/v1.4.0" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" "go.uber.org/zap" ) @@ -98,6 +98,7 @@ func newTracingExporter(ctx context.Context, cfg *Config) (*tracingExporter, err zap.String("address", cfg.ExperimentalDistributedTracingAddress), zap.String("service-name", cfg.ExperimentalDistributedTracingServiceName), zap.String("service-instance-id", cfg.ExperimentalDistributedTracingServiceInstanceID), + zap.Int("sampling-rate", cfg.ExperimentalDistributedTracingSamplingRatePerMillion), ) return &tracingExporter{ @@ -111,7 +112,6 @@ func (te *tracingExporter) Close(ctx context.Context) { if te.provider != nil { te.provider.Shutdown(ctx) } - if te.exporter != nil { te.exporter.Shutdown(ctx) } diff --git a/vendor/go.etcd.io/etcd/server/v3/embed/etcd.go b/vendor/go.etcd.io/etcd/server/v3/embed/etcd.go index b2c7fee4482c9..2f3d47d0a429a 100644 --- a/vendor/go.etcd.io/etcd/server/v3/embed/etcd.go +++ b/vendor/go.etcd.io/etcd/server/v3/embed/etcd.go @@ -16,10 +16,9 @@ package embed import ( "context" - "crypto/tls" "errors" "fmt" - "io/ioutil" + "io" defaultLog "log" "math" "net" @@ -28,9 +27,17 @@ import ( "runtime" "sort" "strconv" + "strings" "sync" "time" + grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + "github.com/soheilhy/cmux" + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/keepalive" + "go.etcd.io/etcd/api/v3/version" "go.etcd.io/etcd/client/pkg/v3/transport" "go.etcd.io/etcd/client/pkg/v3/types" @@ -41,18 +48,9 @@ import ( "go.etcd.io/etcd/server/v3/etcdserver" "go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp" "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2http" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2v3" - "go.etcd.io/etcd/server/v3/etcdserver/api/v3client" - "go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc" + "go.etcd.io/etcd/server/v3/features" + "go.etcd.io/etcd/server/v3/storage" "go.etcd.io/etcd/server/v3/verify" - - grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" - "github.com/soheilhy/cmux" - "go.uber.org/zap" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/keepalive" ) const ( @@ -127,7 +125,7 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) { } e.cfg.logger.Info( "configuring peer listeners", - zap.Strings("listen-peer-urls", e.cfg.getListenPeerUrls()), + zap.Strings("listen-peer-urls", e.cfg.getListenPeerURLs()), ) if e.Peers, err = configurePeerListeners(cfg); err != nil { return e, err @@ -135,7 +133,7 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) { e.cfg.logger.Info( "configuring client listeners", - zap.Strings("listen-client-urls", e.cfg.getListenClientUrls()), + zap.Strings("listen-client-urls", e.cfg.getListenClientURLs()), ) if e.sctxs, err = configureClientListeners(cfg); err != nil { return e, err @@ -154,7 +152,7 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) { memberInitialized = false urlsmap, token, err = cfg.PeerURLsMapAndToken("etcd") if err != nil { - return e, fmt.Errorf("error setting up initial cluster: %v", err) + return e, fmt.Errorf("error setting up initial cluster: %w", err) } } @@ -183,6 +181,7 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) { InitialClusterToken: token, DiscoveryURL: cfg.Durl, DiscoveryProxy: cfg.Dproxy, + DiscoveryCfg: cfg.DiscoveryCfg, NewCluster: cfg.IsNewCluster(), PeerTLSInfo: cfg.PeerTLSInfo, TickMs: cfg.TickMs, @@ -205,7 +204,6 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) { TokenTTL: cfg.AuthTokenTTL, CORS: cfg.CORS, HostWhitelist: cfg.HostWhitelist, - InitialCorruptCheck: cfg.ExperimentalInitialCorruptCheck, CorruptCheckTime: cfg.ExperimentalCorruptCheckTime, CompactHashCheckEnabled: cfg.ExperimentalCompactHashCheckEnabled, CompactHashCheckTime: cfg.ExperimentalCompactHashCheckTime, @@ -222,27 +220,34 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) { WatchProgressNotifyInterval: cfg.ExperimentalWatchProgressNotifyInterval, DowngradeCheckTime: cfg.ExperimentalDowngradeCheckTime, WarningApplyDuration: cfg.ExperimentalWarningApplyDuration, + WarningUnaryRequestDuration: cfg.WarningUnaryRequestDuration, ExperimentalMemoryMlock: cfg.ExperimentalMemoryMlock, ExperimentalTxnModeWriteWithSharedBuffer: cfg.ExperimentalTxnModeWriteWithSharedBuffer, - ExperimentalStopGRPCServiceOnDefrag: cfg.ExperimentalStopGRPCServiceOnDefrag, ExperimentalBootstrapDefragThresholdMegabytes: cfg.ExperimentalBootstrapDefragThresholdMegabytes, - V2Deprecation: cfg.V2DeprecationEffective(), + ExperimentalMaxLearners: cfg.ExperimentalMaxLearners, + V2Deprecation: cfg.V2DeprecationEffective(), + ExperimentalLocalAddress: cfg.InferLocalAddr(), + ServerFeatureGate: cfg.ServerFeatureGate, } if srvcfg.ExperimentalEnableDistributedTracing { tctx := context.Background() - tracingExporter, err := newTracingExporter(tctx, cfg) - if err != nil { - return e, err + tracingExporter, terr := newTracingExporter(tctx, cfg) + if terr != nil { + return e, terr } e.tracingExporterShutdown = func() { tracingExporter.Close(tctx) } srvcfg.ExperimentalTracerOptions = tracingExporter.opts - e.cfg.logger.Info("distributed tracing setup enabled") + e.cfg.logger.Info( + "distributed tracing setup enabled", + ) } + srvcfg.PeerTLSInfo.LocalAddr = srvcfg.ExperimentalLocalAddress + print(e.cfg.logger, *cfg, srvcfg, memberInitialized) if e.Server, err = etcdserver.NewServer(srvcfg); err != nil { @@ -254,7 +259,7 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) { // newly started member ("memberInitialized==false") // does not need corruption check - if memberInitialized && srvcfg.InitialCorruptCheck { + if memberInitialized && srvcfg.ServerFeatureGate.Enabled(features.InitialCorruptCheck) { if err = e.Server.CorruptionChecker().InitialCheck(); err != nil { // set "EtcdServer" to nil, so that it does not block on "EtcdServer.Close()" // (nothing to close since rafthttp transports have not been started) @@ -267,23 +272,21 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) { } e.Server.Start() - if err = e.servePeers(); err != nil { - return e, err - } - if err = e.serveClients(); err != nil { - return e, err - } + e.servePeers() + + e.serveClients() + if err = e.serveMetrics(); err != nil { return e, err } e.cfg.logger.Info( "now serving peer/client/metrics", - zap.String("local-member-id", e.Server.ID().String()), - zap.Strings("initial-advertise-peer-urls", e.cfg.getAdvertisePeerUrls()), - zap.Strings("listen-peer-urls", e.cfg.getListenPeerUrls()), - zap.Strings("advertise-client-urls", e.cfg.getAdvertiseClientUrls()), - zap.Strings("listen-client-urls", e.cfg.getListenClientUrls()), + zap.String("local-member-id", e.Server.MemberID().String()), + zap.Strings("initial-advertise-peer-urls", e.cfg.getAdvertisePeerURLs()), + zap.Strings("listen-peer-urls", e.cfg.getListenPeerURLs()), + zap.Strings("advertise-client-urls", e.cfg.getAdvertiseClientURLs()), + zap.Strings("listen-client-urls", e.cfg.getListenClientURLs()), zap.Strings("listen-metrics-urls", e.cfg.getMetricsURLs()), ) serving = true @@ -305,7 +308,7 @@ func print(lg *zap.Logger, ec Config, sc config.ServerConfig, memberInitialized quota := ec.QuotaBackendBytes if quota == 0 { - quota = etcdserver.DefaultQuotaBytes + quota = storage.DefaultQuotaBytes } lg.Info( @@ -331,11 +334,13 @@ func print(lg *zap.Logger, ec Config, sc config.ServerConfig, memberInitialized zap.Uint("max-wals", sc.MaxWALFiles), zap.Uint("max-snapshots", sc.MaxSnapFiles), zap.Uint64("snapshot-catchup-entries", sc.SnapshotCatchUpEntries), - zap.Strings("initial-advertise-peer-urls", ec.getAdvertisePeerUrls()), - zap.Strings("listen-peer-urls", ec.getListenPeerUrls()), - zap.Strings("advertise-client-urls", ec.getAdvertiseClientUrls()), - zap.Strings("listen-client-urls", ec.getListenClientUrls()), + zap.Strings("initial-advertise-peer-urls", ec.getAdvertisePeerURLs()), + zap.Strings("listen-peer-urls", ec.getListenPeerURLs()), + zap.Strings("advertise-client-urls", ec.getAdvertiseClientURLs()), + zap.Strings("listen-client-urls", ec.getListenClientURLs()), zap.Strings("listen-metrics-urls", ec.getMetricsURLs()), + zap.Bool("experimental-set-member-localaddr", ec.ExperimentalSetMemberLocalAddr), + zap.String("experimental-local-address", sc.ExperimentalLocalAddress), zap.Strings("cors", cors), zap.Strings("host-whitelist", hss), zap.String("initial-cluster", sc.InitialPeerURLsMap.String()), @@ -355,7 +360,22 @@ func print(lg *zap.Logger, ec Config, sc config.ServerConfig, memberInitialized zap.String("auto-compaction-interval", sc.AutoCompactionRetention.String()), zap.String("discovery-url", sc.DiscoveryURL), zap.String("discovery-proxy", sc.DiscoveryProxy), + + zap.String("discovery-token", sc.DiscoveryCfg.Token), + zap.String("discovery-endpoints", strings.Join(sc.DiscoveryCfg.Endpoints, ",")), + zap.String("discovery-dial-timeout", sc.DiscoveryCfg.DialTimeout.String()), + zap.String("discovery-request-timeout", sc.DiscoveryCfg.RequestTimeout.String()), + zap.String("discovery-keepalive-time", sc.DiscoveryCfg.KeepAliveTime.String()), + zap.String("discovery-keepalive-timeout", sc.DiscoveryCfg.KeepAliveTimeout.String()), + zap.Bool("discovery-insecure-transport", sc.DiscoveryCfg.Secure.InsecureTransport), + zap.Bool("discovery-insecure-skip-tls-verify", sc.DiscoveryCfg.Secure.InsecureSkipVerify), + zap.String("discovery-cert", sc.DiscoveryCfg.Secure.Cert), + zap.String("discovery-key", sc.DiscoveryCfg.Secure.Key), + zap.String("discovery-cacert", sc.DiscoveryCfg.Secure.Cacert), + zap.String("discovery-user", sc.DiscoveryCfg.Auth.Username), + zap.String("downgrade-check-interval", sc.DowngradeCheckTime.String()), + zap.Int("max-learners", sc.ExperimentalMaxLearners), ) } @@ -371,8 +391,8 @@ func (e *Etcd) Close() { fields := []zap.Field{ zap.String("name", e.cfg.Name), zap.String("data-dir", e.cfg.Dir), - zap.Strings("advertise-peer-urls", e.cfg.getAdvertisePeerUrls()), - zap.Strings("advertise-client-urls", e.cfg.getAdvertiseClientUrls()), + zap.Strings("advertise-peer-urls", e.cfg.getAdvertisePeerURLs()), + zap.Strings("advertise-client-urls", e.cfg.getAdvertiseClientURLs()), } lg := e.GetLogger() lg.Info("closing etcd server", fields...) @@ -490,9 +510,7 @@ func configurePeerListeners(cfg *Config) (peers []*peerListener, err error) { if err = cfg.PeerSelfCert(); err != nil { cfg.logger.Fatal("failed to get peer self-signed certs", zap.Error(err)) } - updateMinMaxVersions(&cfg.PeerTLSInfo, cfg.TlsMinVersion, cfg.TlsMaxVersion) - if !cfg.PeerTLSInfo.Empty() { cfg.logger.Info( "starting with peer TLS", @@ -548,24 +566,16 @@ func configurePeerListeners(cfg *Config) (peers []*peerListener, err error) { } // configure peer handlers after rafthttp.Transport started -func (e *Etcd) servePeers() (err error) { +func (e *Etcd) servePeers() { ph := etcdhttp.NewPeerHandler(e.GetLogger(), e.Server) - var peerTLScfg *tls.Config - if !e.cfg.PeerTLSInfo.Empty() { - if peerTLScfg, err = e.cfg.PeerTLSInfo.ServerConfig(); err != nil { - return err - } - } for _, p := range e.Peers { u := p.Listener.Addr().String() - gs := v3rpc.Server(e.Server, peerTLScfg, nil) m := cmux.New(p.Listener) - go gs.Serve(m.Match(cmux.HTTP2())) srv := &http.Server{ - Handler: grpcHandlerFunc(gs, ph), + Handler: ph, ReadTimeout: 5 * time.Minute, - ErrorLog: defaultLog.New(ioutil.Discard, "", 0), // do not log user error + ErrorLog: defaultLog.New(io.Discard, "", 0), // do not log user error } go srv.Serve(m.Match(cmux.Any())) p.serve = func() error { @@ -583,7 +593,7 @@ func (e *Etcd) servePeers() (err error) { "stopping serving peer traffic", zap.String("address", u), ) - stopServers(ctx, &servers{secure: peerTLScfg != nil, grpc: gs, http: srv}) + srv.Shutdown(ctx) e.cfg.logger.Info( "stopped serving peer traffic", zap.String("address", u), @@ -604,7 +614,6 @@ func (e *Etcd) servePeers() (err error) { e.errHandler(l.serve()) }(pl) } - return nil } func configureClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err error) { @@ -614,9 +623,7 @@ func configureClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err erro if err = cfg.ClientSelfCert(); err != nil { cfg.logger.Fatal("failed to get client self-signed certs", zap.Error(err)) } - updateMinMaxVersions(&cfg.ClientTLSInfo, cfg.TlsMinVersion, cfg.TlsMaxVersion) - if cfg.EnablePprof { cfg.logger.Info("pprof is enabled", zap.String("path", debugutil.HTTPPrefixPProf)) } @@ -637,7 +644,7 @@ func configureClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err erro } for _, u := range cfg.ListenClientUrls { - addr, secure, network := resolveUrl(u) + addr, secure, network := resolveURL(u) sctx := sctxs[addr] if sctx == nil { sctx = newServeCtx(cfg.logger) @@ -650,14 +657,14 @@ func configureClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err erro sctx.network = network } for _, u := range cfg.ListenClientHttpUrls { - addr, secure, network := resolveUrl(u) + addr, secure, network := resolveURL(u) sctx := sctxs[addr] if sctx == nil { sctx = newServeCtx(cfg.logger) sctxs[addr] = sctx } else if !sctx.httpOnly { - return nil, fmt.Errorf("cannot bind both --client-listen-urls and --client-listen-http-urls on the same url %s", u.String()) + return nil, fmt.Errorf("cannot bind both --listen-client-urls and --listen-client-http-urls on the same url %s", u.String()) } sctx.secure = sctx.secure || secure sctx.insecure = sctx.insecure || !secure @@ -713,7 +720,7 @@ func configureClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err erro return sctxs, nil } -func resolveUrl(u url.URL) (addr string, secure bool, network string) { +func resolveURL(u url.URL) (addr string, secure bool, network string) { addr = u.Host network = "tcp" if u.Scheme == "unix" || u.Scheme == "unixs" { @@ -724,7 +731,7 @@ func resolveUrl(u url.URL) (addr string, secure bool, network string) { return addr, secure, network } -func (e *Etcd) serveClients() (err error) { +func (e *Etcd) serveClients() { if !e.cfg.ClientTLSInfo.Empty() { e.cfg.logger.Info( "starting with client TLS", @@ -734,28 +741,13 @@ func (e *Etcd) serveClients() (err error) { } // Start a client server goroutine for each listen address - var h http.Handler - if e.Config().EnableV2 { - if e.Config().V2DeprecationEffective().IsAtLeast(config.V2_DEPR_1_WRITE_ONLY) { - return fmt.Errorf("--enable-v2 and --v2-deprecation=%s are mutually exclusive", e.Config().V2DeprecationEffective()) - } - e.cfg.logger.Warn("Flag `enable-v2` is deprecated and will get removed in etcd 3.6.") - if len(e.Config().ExperimentalEnableV2V3) > 0 { - e.cfg.logger.Warn("Flag `experimental-enable-v2v3` is deprecated and will get removed in etcd 3.6.") - srv := v2v3.NewServer(e.cfg.logger, v3client.New(e.Server), e.cfg.ExperimentalEnableV2V3) - h = v2http.NewClientHandler(e.GetLogger(), srv, e.Server.Cfg.ReqTimeout()) - } else { - h = v2http.NewClientHandler(e.GetLogger(), e.Server, e.Server.Cfg.ReqTimeout()) - } - } else { - mux := http.NewServeMux() - etcdhttp.HandleBasic(e.cfg.logger, mux, e.Server) - etcdhttp.HandleMetrics(mux) - etcdhttp.HandleHealth(e.cfg.logger, mux, e.Server) - h = mux - } + mux := http.NewServeMux() + etcdhttp.HandleDebug(mux) + etcdhttp.HandleVersion(mux, e.Server) + etcdhttp.HandleMetrics(mux) + etcdhttp.HandleHealth(e.cfg.logger, mux, e.Server) - gopts := []grpc.ServerOption{} + var gopts []grpc.ServerOption if e.cfg.GRPCKeepAliveMinTime > time.Duration(0) { gopts = append(gopts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ MinTime: e.cfg.GRPCKeepAliveMinTime, @@ -769,34 +761,33 @@ func (e *Etcd) serveClients() (err error) { Timeout: e.cfg.GRPCKeepAliveTimeout, })) } + gopts = append(gopts, e.cfg.GRPCAdditionalServerOptions...) - splitHttp := false + splitHTTP := false for _, sctx := range e.sctxs { if sctx.httpOnly { - splitHttp = true + splitHTTP = true } } // start client servers in each goroutine for _, sctx := range e.sctxs { go func(s *serveCtx) { - e.errHandler(s.serve(e.Server, &e.cfg.ClientTLSInfo, h, e.errHandler, e.grpcGatewayDial(splitHttp), splitHttp, gopts...)) + e.errHandler(s.serve(e.Server, &e.cfg.ClientTLSInfo, mux, e.errHandler, e.grpcGatewayDial(splitHTTP), splitHTTP, gopts...)) }(sctx) } - return nil } -func (e *Etcd) grpcGatewayDial(splitHttp bool) (grpcDial func(ctx context.Context) (*grpc.ClientConn, error)) { +func (e *Etcd) grpcGatewayDial(splitHTTP bool) (grpcDial func(ctx context.Context) (*grpc.ClientConn, error)) { if !e.cfg.EnableGRPCGateway { return nil } - sctx := e.pickGrpcGatewayServeContext(splitHttp) + sctx := e.pickGRPCGatewayServeContext(splitHTTP) addr := sctx.addr if network := sctx.network; network == "unix" { // explicitly define unix network for gRPC socket support addr = fmt.Sprintf("%s:%s", network, addr) } - opts := []grpc.DialOption{grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(math.MaxInt32))} if sctx.secure { tlscfg, tlsErr := e.cfg.ClientTLSInfo.ServerConfig() @@ -808,8 +799,7 @@ func (e *Etcd) grpcGatewayDial(splitHttp bool) (grpcDial func(ctx context.Contex dtls := tlscfg.Clone() // trust local server dtls.InsecureSkipVerify = true - bundle := credentials.NewBundle(credentials.Config{TLSConfig: dtls}) - opts = append(opts, grpc.WithTransportCredentials(bundle.TransportCredentials())) + opts = append(opts, grpc.WithTransportCredentials(credentials.NewTransportCredential(dtls))) } else { opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) } @@ -824,9 +814,9 @@ func (e *Etcd) grpcGatewayDial(splitHttp bool) (grpcDial func(ctx context.Contex } } -func (e *Etcd) pickGrpcGatewayServeContext(splitHttp bool) *serveCtx { +func (e *Etcd) pickGRPCGatewayServeContext(splitHTTP bool) *serveCtx { for _, sctx := range e.sctxs { - if !splitHttp || !sctx.httpOnly { + if !splitHTTP || !sctx.httpOnly { return sctx } } @@ -880,6 +870,9 @@ func (e *Etcd) serveMetrics() (err error) { } func (e *Etcd) errHandler(err error) { + if err != nil { + e.GetLogger().Error("setting up serving from embedded etcd failed.", zap.Error(err)) + } select { case <-e.stopc: return @@ -907,12 +900,14 @@ func parseCompactionRetention(mode, retention string) (ret time.Duration, err er ret = time.Duration(int64(h)) case CompactorModePeriodic: ret = time.Duration(int64(h)) * time.Hour + case "": + return 0, errors.New("--auto-compaction-mode is undefined") } } else { // periodic compaction ret, err = time.ParseDuration(retention) if err != nil { - return 0, fmt.Errorf("error parsing CompactionRetention: %v", err) + return 0, fmt.Errorf("error parsing CompactionRetention: %w", err) } } return ret, nil diff --git a/vendor/go.etcd.io/etcd/server/v3/embed/serve.go b/vendor/go.etcd.io/etcd/server/v3/embed/serve.go index 91ec6e9a376b2..8488a932c8ad9 100644 --- a/vendor/go.etcd.io/etcd/server/v3/embed/serve.go +++ b/vendor/go.etcd.io/etcd/server/v3/embed/serve.go @@ -17,12 +17,21 @@ package embed import ( "context" "fmt" - "io/ioutil" + "io" defaultLog "log" "net" "net/http" "strings" + gw "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" + "github.com/soheilhy/cmux" + "github.com/tmc/grpc-websocket-proxy/wsproxy" + "go.uber.org/zap" + "golang.org/x/net/http2" + "golang.org/x/net/trace" + "google.golang.org/grpc" + "google.golang.org/protobuf/encoding/protojson" + etcdservergw "go.etcd.io/etcd/api/v3/etcdserverpb/gw" "go.etcd.io/etcd/client/pkg/v3/transport" "go.etcd.io/etcd/pkg/v3/debugutil" @@ -37,14 +46,6 @@ import ( "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb" v3lockgw "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb/gw" "go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc" - - gw "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/soheilhy/cmux" - "github.com/tmc/grpc-websocket-proxy/wsproxy" - "go.uber.org/zap" - "golang.org/x/net/http2" - "golang.org/x/net/trace" - "google.golang.org/grpc" ) type serveCtx struct { @@ -95,18 +96,18 @@ func (sctx *serveCtx) serve( handler http.Handler, errHandler func(error), grpcDialForRestGatewayBackends func(ctx context.Context) (*grpc.ClientConn, error), - splitHttp bool, + splitHTTP bool, gopts ...grpc.ServerOption) (err error) { - logger := defaultLog.New(ioutil.Discard, "etcdhttp", 0) + logger := defaultLog.New(io.Discard, "etcdhttp", 0) <-s.ReadyNotify() sctx.lg.Info("ready to serve client requests") m := cmux.New(sctx.l) var server func() error - onlyGRPC := splitHttp && !sctx.httpOnly - onlyHttp := splitHttp && sctx.httpOnly - grpcEnabled := !onlyHttp + onlyGRPC := splitHTTP && !sctx.httpOnly + onlyHTTP := splitHTTP && sctx.httpOnly + grpcEnabled := !onlyHTTP httpEnabled := !onlyGRPC v3c := v3client.New(s) @@ -128,7 +129,7 @@ func (sctx *serveCtx) serve( switch { case onlyGRPC: traffic = "grpc" - case onlyHttp: + case onlyHTTP: traffic = "http" default: traffic = "grpc+http" @@ -143,7 +144,7 @@ func (sctx *serveCtx) serve( Handler: createAccessController(sctx.lg, s, httpmux), ErrorLog: logger, // do not log user error } - if err := configureHttpServer(srv, s.Cfg); err != nil { + if err = configureHTTPServer(srv, s.Cfg); err != nil { sctx.lg.Error("Configure http server failed", zap.Error(err)) return err } @@ -226,7 +227,7 @@ func (sctx *serveCtx) serve( TLSConfig: tlscfg, ErrorLog: logger, // do not log user error } - if err := configureHttpServer(srv, s.Cfg); err != nil { + if err := configureHTTPServer(srv, s.Cfg); err != nil { sctx.lg.Error("Configure https server failed", zap.Error(err)) return err } @@ -257,7 +258,7 @@ func (sctx *serveCtx) serve( return server() } -func configureHttpServer(srv *http.Server, cfg config.ServerConfig) error { +func configureHTTPServer(srv *http.Server, cfg config.ServerConfig) error { // todo (ahrtr): should we support configuring other parameters in the future as well? return http2.ConfigureServer(srv, &http2.Server{ MaxConcurrentStreams: cfg.MaxConcurrentStreams, @@ -290,7 +291,23 @@ func (sctx *serveCtx) registerGateway(dial func(ctx context.Context) (*grpc.Clie if err != nil { return nil, err } - gwmux := gw.NewServeMux() + + // Refer to https://grpc-ecosystem.github.io/grpc-gateway/docs/mapping/customizing_your_gateway/ + gwmux := gw.NewServeMux( + gw.WithMarshalerOption(gw.MIMEWildcard, + &gw.HTTPBodyMarshaler{ + Marshaler: &gw.JSONPb{ + MarshalOptions: protojson.MarshalOptions{ + UseProtoNames: true, + EmitUnpopulated: false, + }, + UnmarshalOptions: protojson.UnmarshalOptions{ + DiscardUnknown: true, + }, + }, + }, + ), + ) handlers := []registerHandlerFunc{ etcdservergw.RegisterKVHandler, @@ -325,11 +342,11 @@ type wsProxyZapLogger struct { *zap.Logger } -func (w wsProxyZapLogger) Warnln(i ...interface{}) { +func (w wsProxyZapLogger) Warnln(i ...any) { w.Warn(fmt.Sprint(i...)) } -func (w wsProxyZapLogger) Debugln(i ...interface{}) { +func (w wsProxyZapLogger) Debugln(i ...any) { w.Debug(fmt.Sprint(i...)) } @@ -419,7 +436,7 @@ func (ac *accessController) ServeHTTP(rw http.ResponseWriter, req *http.Request) addCORSHeader(rw, origin) } - if req.Method == "OPTIONS" { + if req.Method == http.MethodOptions { rw.WriteHeader(http.StatusOK) return } @@ -469,7 +486,7 @@ func (ch *corsHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { addCORSHeader(rw, origin) } - if req.Method == "OPTIONS" { + if req.Method == http.MethodOptions { rw.WriteHeader(http.StatusOK) return } diff --git a/vendor/go.etcd.io/etcd/server/v3/embed/util.go b/vendor/go.etcd.io/etcd/server/v3/embed/util.go index ad46153455151..32efbe67a46f0 100644 --- a/vendor/go.etcd.io/etcd/server/v3/embed/util.go +++ b/vendor/go.etcd.io/etcd/server/v3/embed/util.go @@ -17,13 +17,13 @@ package embed import ( "path/filepath" - "go.etcd.io/etcd/server/v3/wal" + "go.etcd.io/etcd/server/v3/storage/wal" ) func isMemberInitialized(cfg *Config) bool { - waldir := cfg.WalDir - if waldir == "" { - waldir = filepath.Join(cfg.Dir, "member", "wal") + walDir := cfg.WalDir + if walDir == "" { + walDir = filepath.Join(cfg.Dir, "member", "wal") } - return wal.Exist(waldir) + return wal.Exist(walDir) } diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/adapters.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/adapters.go new file mode 100644 index 0000000000000..35660a27bd0b1 --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/adapters.go @@ -0,0 +1,89 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "context" + + "github.com/coreos/go-semver/semver" + + pb "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.etcd.io/etcd/api/v3/membershippb" + "go.etcd.io/etcd/api/v3/version" + serverversion "go.etcd.io/etcd/server/v3/etcdserver/version" + "go.etcd.io/etcd/server/v3/storage/schema" +) + +// serverVersionAdapter implements the interface Server defined in package +// go.etcd.io/etcd/server/v3/etcdserver/version, and it's needed by Monitor +// in the same package. +type serverVersionAdapter struct { + *EtcdServer +} + +func NewServerVersionAdapter(s *EtcdServer) *serverVersionAdapter { + return &serverVersionAdapter{ + EtcdServer: s, + } +} + +var _ serverversion.Server = (*serverVersionAdapter)(nil) + +func (s *serverVersionAdapter) UpdateClusterVersion(version string) { + s.GoAttach(func() { s.updateClusterVersionV3(version) }) +} + +func (s *serverVersionAdapter) LinearizableReadNotify(ctx context.Context) error { + return s.linearizableReadNotify(ctx) +} + +func (s *serverVersionAdapter) DowngradeEnable(ctx context.Context, targetVersion *semver.Version) error { + raftRequest := membershippb.DowngradeInfoSetRequest{Enabled: true, Ver: targetVersion.String()} + _, err := s.raftRequest(ctx, pb.InternalRaftRequest{DowngradeInfoSet: &raftRequest}) + return err +} + +func (s *serverVersionAdapter) DowngradeCancel(ctx context.Context) error { + raftRequest := membershippb.DowngradeInfoSetRequest{Enabled: false} + _, err := s.raftRequest(ctx, pb.InternalRaftRequest{DowngradeInfoSet: &raftRequest}) + return err +} + +func (s *serverVersionAdapter) GetClusterVersion() *semver.Version { + return s.cluster.Version() +} + +func (s *serverVersionAdapter) GetDowngradeInfo() *serverversion.DowngradeInfo { + return s.cluster.DowngradeInfo() +} + +func (s *serverVersionAdapter) GetMembersVersions() map[string]*version.Versions { + return getMembersVersions(s.lg, s.cluster, s.MemberID(), s.peerRt, s.Cfg.ReqTimeout()) +} + +func (s *serverVersionAdapter) GetStorageVersion() *semver.Version { + return s.StorageVersion() +} + +func (s *serverVersionAdapter) UpdateStorageVersion(target semver.Version) error { + // `applySnapshot` sets a new backend instance, so we need to acquire the bemu lock. + s.bemu.RLock() + defer s.bemu.RUnlock() + + tx := s.be.BatchTx() + tx.LockOutsideApply() + defer tx.Unlock() + return schema.UnsafeMigrate(s.lg, tx, s.r.storage, target) +} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/capability.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/capability.go index ea2f0e97e4bdf..cf535ec4efaad 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/capability.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/capability.go @@ -17,11 +17,11 @@ package api import ( "sync" - "go.etcd.io/etcd/api/v3/version" - "go.etcd.io/etcd/server/v3/etcdserver/api/membership" + "github.com/coreos/go-semver/semver" "go.uber.org/zap" - "github.com/coreos/go-semver/semver" + "go.etcd.io/etcd/api/v3/version" + serverversion "go.etcd.io/etcd/server/v3/etcdserver/version" ) type Capability string @@ -40,6 +40,7 @@ var ( "3.3.0": {AuthCapability: true, V3rpcCapability: true}, "3.4.0": {AuthCapability: true, V3rpcCapability: true}, "3.5.0": {AuthCapability: true, V3rpcCapability: true}, + "3.6.0": {AuthCapability: true, V3rpcCapability: true}, } enableMapMu sync.RWMutex @@ -63,7 +64,7 @@ func UpdateCapability(lg *zap.Logger, v *semver.Version) { return } enableMapMu.Lock() - if curVersion != nil && !membership.IsValidVersionChange(v, curVersion) { + if curVersion != nil && !serverversion.IsValidClusterVersionChange(curVersion, v) { enableMapMu.Unlock() return } diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/base.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/base.go deleted file mode 100644 index dcfa3f0695942..0000000000000 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/base.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdhttp - -import ( - "encoding/json" - "expvar" - "fmt" - "net/http" - - "go.etcd.io/etcd/api/v3/version" - "go.etcd.io/etcd/server/v3/etcdserver" - "go.etcd.io/etcd/server/v3/etcdserver/api" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2error" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2http/httptypes" - "go.uber.org/zap" -) - -const ( - configPath = "/config" - varsPath = "/debug/vars" - versionPath = "/version" -) - -// HandleBasic adds handlers to a mux for serving JSON etcd client requests -// that do not access the v2 store. -func HandleBasic(lg *zap.Logger, mux *http.ServeMux, server etcdserver.ServerPeer) { - mux.HandleFunc(varsPath, serveVars) - mux.HandleFunc(versionPath, versionHandler(server.Cluster(), serveVersion)) -} - -func versionHandler(c api.Cluster, fn func(http.ResponseWriter, *http.Request, string)) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - v := c.Version() - if v != nil { - fn(w, r, v.String()) - } else { - fn(w, r, "not_decided") - } - } -} - -func serveVersion(w http.ResponseWriter, r *http.Request, clusterV string) { - if !allowMethod(w, r, "GET") { - return - } - vs := version.Versions{ - Server: version.Version, - Cluster: clusterV, - } - - w.Header().Set("Content-Type", "application/json") - b, err := json.Marshal(&vs) - if err != nil { - panic(fmt.Sprintf("cannot marshal versions to json (%v)", err)) - } - w.Write(b) -} - -func serveVars(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r, "GET") { - return - } - - w.Header().Set("Content-Type", "application/json; charset=utf-8") - fmt.Fprintf(w, "{\n") - first := true - expvar.Do(func(kv expvar.KeyValue) { - if !first { - fmt.Fprintf(w, ",\n") - } - first = false - fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) - }) - fmt.Fprintf(w, "\n}\n") -} - -func allowMethod(w http.ResponseWriter, r *http.Request, m string) bool { - if m == r.Method { - return true - } - w.Header().Set("Allow", m) - http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) - return false -} - -// WriteError logs and writes the given Error to the ResponseWriter -// If Error is an etcdErr, it is rendered to the ResponseWriter -// Otherwise, it is assumed to be a StatusInternalServerError -func WriteError(lg *zap.Logger, w http.ResponseWriter, r *http.Request, err error) { - if err == nil { - return - } - switch e := err.(type) { - case *v2error.Error: - e.WriteTo(w) - - case *httptypes.HTTPError: - if et := e.WriteTo(w); et != nil { - if lg != nil { - lg.Debug( - "failed to write v2 HTTP error", - zap.String("remote-addr", r.RemoteAddr), - zap.String("internal-server-error", e.Error()), - zap.Error(et), - ) - } - } - - default: - switch err { - case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost, etcdserver.ErrNotEnoughStartedMembers, - etcdserver.ErrUnhealthy: - if lg != nil { - lg.Warn( - "v2 response error", - zap.String("remote-addr", r.RemoteAddr), - zap.String("internal-server-error", err.Error()), - ) - } - - default: - if lg != nil { - lg.Warn( - "unexpected v2 response error", - zap.String("remote-addr", r.RemoteAddr), - zap.String("internal-server-error", err.Error()), - ) - } - } - - herr := httptypes.NewHTTPError(http.StatusInternalServerError, "Internal Server Error") - if et := herr.WriteTo(w); et != nil { - if lg != nil { - lg.Debug( - "failed to write v2 HTTP error", - zap.String("remote-addr", r.RemoteAddr), - zap.String("internal-server-error", err.Error()), - zap.Error(et), - ) - } - } - } -} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/debug.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/debug.go new file mode 100644 index 0000000000000..ab7feee97f664 --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/debug.go @@ -0,0 +1,47 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdhttp + +import ( + "expvar" + "fmt" + "net/http" +) + +const ( + varsPath = "/debug/vars" +) + +func HandleDebug(mux *http.ServeMux) { + mux.HandleFunc(varsPath, serveVars) +} + +func serveVars(w http.ResponseWriter, r *http.Request) { + if !allowMethod(w, r, "GET") { + return + } + + w.Header().Set("Content-Type", "application/json; charset=utf-8") + fmt.Fprint(w, "{\n") + first := true + expvar.Do(func(kv expvar.KeyValue) { + if !first { + fmt.Fprint(w, ",\n") + } + first = false + fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) + }) + fmt.Fprint(w, "\n}\n") +} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/health.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/health.go index 15655e580cc0b..ccc6d5b720849 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/health.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/health.go @@ -25,17 +25,15 @@ import ( "net/http" "path" "strings" - "time" + "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" - "github.com/prometheus/client_golang/prometheus" pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/raft/v3" "go.etcd.io/etcd/server/v3/auth" "go.etcd.io/etcd/server/v3/config" - "go.etcd.io/etcd/server/v3/etcdserver" + "go.etcd.io/raft/v3" ) const ( @@ -49,30 +47,13 @@ const ( ) type ServerHealth interface { - serverHealthV2V3 + Alarms() []*pb.AlarmMember + Leader() types.ID Range(context.Context, *pb.RangeRequest) (*pb.RangeResponse, error) Config() config.ServerConfig AuthStore() auth.AuthStore } -type serverHealthV2V3 interface { - Alarms() []*pb.AlarmMember - Leader() types.ID -} - -// HandleHealthForV2 registers metrics and health handlers for v2. -func HandleHealthForV2(lg *zap.Logger, mux *http.ServeMux, srv etcdserver.ServerV2) { - mux.Handle(PathHealth, NewHealthHandler(lg, func(ctx context.Context, excludedAlarms StringSet, serializable bool) Health { - if h := checkAlarms(lg, srv, excludedAlarms); h.Health != "true" { - return h - } - if h := checkLeader(lg, srv, serializable); h.Health != "true" { - return h - } - return checkV2API(ctx, lg, srv) - })) -} - // HandleHealth registers metrics and health handlers. it checks health by using v3 range request // and its corresponding timeout. func HandleHealth(lg *zap.Logger, mux *http.ServeMux, srv ServerHealth) { @@ -196,35 +177,33 @@ func getSerializableFlag(r *http.Request) bool { // TODO: etcdserver.ErrNoLeader in health API -func checkAlarms(lg *zap.Logger, srv serverHealthV2V3, excludedAlarms StringSet) Health { +func checkAlarms(lg *zap.Logger, srv ServerHealth, excludedAlarms StringSet) Health { h := Health{Health: "true"} - as := srv.Alarms() - if len(as) > 0 { - for _, v := range as { - alarmName := v.Alarm.String() - if _, found := excludedAlarms[alarmName]; found { - lg.Debug("/health excluded alarm", zap.String("alarm", v.String())) - continue - } - h.Health = "false" - switch v.Alarm { - case pb.AlarmType_NOSPACE: - h.Reason = "ALARM NOSPACE" - case pb.AlarmType_CORRUPT: - h.Reason = "ALARM CORRUPT" - default: - h.Reason = "ALARM UNKNOWN" - } - lg.Warn("serving /health false due to an alarm", zap.String("alarm", v.String())) - return h + for _, v := range srv.Alarms() { + alarmName := v.Alarm.String() + if _, found := excludedAlarms[alarmName]; found { + lg.Debug("/health excluded alarm", zap.String("alarm", v.String())) + continue + } + + h.Health = "false" + switch v.Alarm { + case pb.AlarmType_NOSPACE: + h.Reason = "ALARM NOSPACE" + case pb.AlarmType_CORRUPT: + h.Reason = "ALARM CORRUPT" + default: + h.Reason = "ALARM UNKNOWN" } + lg.Warn("serving /health false due to an alarm", zap.String("alarm", v.String())) + return h } return h } -func checkLeader(lg *zap.Logger, srv serverHealthV2V3, serializable bool) Health { +func checkLeader(lg *zap.Logger, srv ServerHealth, serializable bool) Health { h := Health{Health: "true"} if !serializable && (uint64(srv.Leader()) == raft.None) { h.Health = "false" @@ -234,21 +213,6 @@ func checkLeader(lg *zap.Logger, srv serverHealthV2V3, serializable bool) Health return h } -func checkV2API(ctx context.Context, lg *zap.Logger, srv etcdserver.ServerV2) Health { - h := Health{Health: "true"} - ctx, cancel := context.WithTimeout(ctx, time.Second) - _, err := srv.Do(ctx, pb.Request{Method: "QGET"}) - cancel() - if err != nil { - h.Health = "false" - h.Reason = fmt.Sprintf("QGET ERROR:%s", err) - lg.Warn("serving /health false; QGET fails", zap.Error(err)) - return h - } - lg.Debug("serving /health true") - return h -} - func checkAPI(ctx context.Context, lg *zap.Logger, srv ServerHealth, serializable bool) Health { h := Health{Health: "true"} cfg := srv.Config() @@ -276,7 +240,7 @@ type CheckRegistry struct { func installLivezEndpoints(lg *zap.Logger, mux *http.ServeMux, server ServerHealth) { reg := CheckRegistry{checkType: checkTypeLivez, checks: make(map[string]HealthCheck)} reg.Register("serializable_read", readCheck(server, true /* serializable */)) - reg.InstallHttpEndpoints(lg, mux) + reg.InstallHTTPEndpoints(lg, mux) } func installReadyzEndpoints(lg *zap.Logger, mux *http.ServeMux, server ServerHealth) { @@ -288,7 +252,7 @@ func installReadyzEndpoints(lg *zap.Logger, mux *http.ServeMux, server ServerHea reg.Register("serializable_read", readCheck(server, true)) // linearizable_read check would be replaced by read_index check in 3.6 reg.Register("linearizable_read", readCheck(server, false)) - reg.InstallHttpEndpoints(lg, mux) + reg.InstallHTTPEndpoints(lg, mux) } func (reg *CheckRegistry) Register(name string, check HealthCheck) { @@ -299,14 +263,23 @@ func (reg *CheckRegistry) RootPath() string { return "/" + reg.checkType } +// InstallHttpEndpoints installs the http handlers for the health checks. +// +// Deprecated: Please use (*CheckRegistry) InstallHTTPEndpoints instead. +// +//revive:disable-next-line:var-naming func (reg *CheckRegistry) InstallHttpEndpoints(lg *zap.Logger, mux *http.ServeMux) { + reg.InstallHTTPEndpoints(lg, mux) +} + +func (reg *CheckRegistry) InstallHTTPEndpoints(lg *zap.Logger, mux *http.ServeMux) { checkNames := make([]string, 0, len(reg.checks)) for k := range reg.checks { checkNames = append(checkNames, k) } // installs the http handler for the root path. - reg.installRootHttpEndpoint(lg, mux, checkNames...) + reg.installRootHTTPEndpoint(lg, mux, checkNames...) for _, checkName := range checkNames { // installs the http handler for the individual check sub path. subpath := path.Join(reg.RootPath(), checkName) @@ -338,8 +311,8 @@ func (reg *CheckRegistry) runHealthChecks(ctx context.Context, checkNames ...str return h } -// installRootHttpEndpoint installs the http handler for the root path. -func (reg *CheckRegistry) installRootHttpEndpoint(lg *zap.Logger, mux *http.ServeMux, checks ...string) { +// installRootHTTPEndpoint installs the http handler for the root path. +func (reg *CheckRegistry) installRootHTTPEndpoint(lg *zap.Logger, mux *http.ServeMux, checks ...string) { hfunc := func(r *http.Request) HealthStatus { // extracts the health check names to be excludeList from the query param excluded := getQuerySet(r, "exclude") diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/peer.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/peer.go index badc98634b1ff..de5948d30f502 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/peer.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/peer.go @@ -16,19 +16,21 @@ package etcdhttp import ( "encoding/json" + errorspkg "errors" "fmt" "net/http" "strconv" "strings" + "go.uber.org/zap" + "go.etcd.io/etcd/client/pkg/v3/types" "go.etcd.io/etcd/server/v3/etcdserver" "go.etcd.io/etcd/server/v3/etcdserver/api" "go.etcd.io/etcd/server/v3/etcdserver/api/membership" "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp" + "go.etcd.io/etcd/server/v3/etcdserver/errors" "go.etcd.io/etcd/server/v3/lease/leasehttp" - - "go.uber.org/zap" ) const ( @@ -71,7 +73,7 @@ func newPeerHandler( if hashKVHandler != nil { mux.Handle(etcdserver.PeerHashKVPath, hashKVHandler) } - mux.HandleFunc(versionPath, versionHandler(s.Cluster(), serveVersion)) + mux.HandleFunc(versionPath, versionHandler(s, serveVersion)) return mux } @@ -137,15 +139,15 @@ func (h *peerMemberPromoteHandler) ServeHTTP(w http.ResponseWriter, r *http.Requ resp, err := h.server.PromoteMember(r.Context(), id) if err != nil { - switch err { - case membership.ErrIDNotFound: + switch { + case errorspkg.Is(err, membership.ErrIDNotFound): http.Error(w, err.Error(), http.StatusNotFound) - case membership.ErrMemberNotLearner: + case errorspkg.Is(err, membership.ErrMemberNotLearner): http.Error(w, err.Error(), http.StatusPreconditionFailed) - case etcdserver.ErrLearnerNotReady: + case errorspkg.Is(err, errors.ErrLearnerNotReady): http.Error(w, err.Error(), http.StatusPreconditionFailed) default: - WriteError(h.lg, w, r, err) + writeError(h.lg, w, r, err) } h.lg.Warn( "failed to promote a member", diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2http/httptypes/errors.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/types/errors.go similarity index 100% rename from vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2http/httptypes/errors.go rename to vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/types/errors.go diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/utils.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/utils.go new file mode 100644 index 0000000000000..082fa5a9b2d02 --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/utils.go @@ -0,0 +1,99 @@ +// Copyright 2022 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdhttp + +import ( + errorspkg "errors" + "net/http" + + "go.uber.org/zap" + + httptypes "go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/types" + "go.etcd.io/etcd/server/v3/etcdserver/api/v2error" + "go.etcd.io/etcd/server/v3/etcdserver/errors" +) + +func allowMethod(w http.ResponseWriter, r *http.Request, m string) bool { + if m == r.Method { + return true + } + w.Header().Set("Allow", m) + http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) + return false +} + +// writeError logs and writes the given Error to the ResponseWriter +// If Error is an etcdErr, it is rendered to the ResponseWriter +// Otherwise, it is assumed to be a StatusInternalServerError +func writeError(lg *zap.Logger, w http.ResponseWriter, r *http.Request, err error) { + if err == nil { + return + } + var v2Err *v2error.Error + var httpErr *httptypes.HTTPError + switch { + case errorspkg.As(err, &v2Err): + v2Err.WriteTo(w) + + case errorspkg.As(err, &httpErr): + if et := httpErr.WriteTo(w); et != nil { + if lg != nil { + lg.Debug( + "failed to write v2 HTTP error", + zap.String("remote-addr", r.RemoteAddr), + zap.String("internal-server-error", httpErr.Error()), + zap.Error(et), + ) + } + } + + default: + switch { + case + errorspkg.Is(err, errors.ErrTimeoutDueToLeaderFail), + errorspkg.Is(err, errors.ErrTimeoutDueToConnectionLost), + errorspkg.Is(err, errors.ErrNotEnoughStartedMembers), + errorspkg.Is(err, errors.ErrUnhealthy): + if lg != nil { + lg.Warn( + "v2 response error", + zap.String("remote-addr", r.RemoteAddr), + zap.String("internal-server-error", err.Error()), + ) + } + + default: + if lg != nil { + lg.Warn( + "unexpected v2 response error", + zap.String("remote-addr", r.RemoteAddr), + zap.String("internal-server-error", err.Error()), + ) + } + } + + herr := httptypes.NewHTTPError(http.StatusInternalServerError, "Internal Server Error") + if et := herr.WriteTo(w); et != nil { + if lg != nil { + lg.Debug( + "failed to write v2 HTTP error", + zap.String("remote-addr", r.RemoteAddr), + zap.String("internal-server-error", err.Error()), + zap.Error(et), + ) + } + } + } +} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/version.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/version.go new file mode 100644 index 0000000000000..8090703a0edfc --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/version.go @@ -0,0 +1,65 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdhttp + +import ( + "encoding/json" + "fmt" + "net/http" + + "go.etcd.io/etcd/api/v3/version" + "go.etcd.io/etcd/server/v3/etcdserver" +) + +const ( + versionPath = "/version" +) + +func HandleVersion(mux *http.ServeMux, server etcdserver.Server) { + mux.HandleFunc(versionPath, versionHandler(server, serveVersion)) +} + +func versionHandler(server etcdserver.Server, fn func(http.ResponseWriter, *http.Request, string, string)) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + clusterVersion := server.ClusterVersion() + storageVersion := server.StorageVersion() + clusterVersionStr, storageVersionStr := "not_decided", "unknown" + if clusterVersion != nil { + clusterVersionStr = clusterVersion.String() + } + if storageVersion != nil { + storageVersionStr = storageVersion.String() + } + fn(w, r, clusterVersionStr, storageVersionStr) + } +} + +func serveVersion(w http.ResponseWriter, r *http.Request, clusterV, storageV string) { + if !allowMethod(w, r, "GET") { + return + } + vs := version.Versions{ + Server: version.Version, + Cluster: clusterV, + Storage: storageV, + } + + w.Header().Set("Content-Type", "application/json") + b, err := json.Marshal(&vs) + if err != nil { + panic(fmt.Sprintf("cannot marshal versions to json (%v)", err)) + } + w.Write(b) +} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/cluster.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/cluster.go index e177a63029f33..e35375a22c6a2 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/cluster.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/cluster.go @@ -15,36 +15,30 @@ package membership import ( - "bytes" "context" "crypto/sha1" "encoding/binary" "encoding/json" - "errors" "fmt" - "path" "sort" "strings" "sync" "time" + "github.com/coreos/go-semver/semver" + "github.com/prometheus/client_golang/prometheus" + "go.uber.org/zap" + "go.etcd.io/etcd/api/v3/version" "go.etcd.io/etcd/client/pkg/v3/types" "go.etcd.io/etcd/pkg/v3/netutil" - "go.etcd.io/etcd/raft/v3" - "go.etcd.io/etcd/raft/v3/raftpb" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2error" + "go.etcd.io/etcd/pkg/v3/notify" "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" - "go.etcd.io/etcd/server/v3/mvcc/backend" - "go.etcd.io/etcd/server/v3/mvcc/buckets" - - "github.com/coreos/go-semver/semver" - "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" + serverversion "go.etcd.io/etcd/server/v3/etcdserver/version" + "go.etcd.io/raft/v3" + "go.etcd.io/raft/v3/raftpb" ) -const maxLearners = 1 - // RaftCluster is a list of Members that belong to the same raft cluster type RaftCluster struct { lg *zap.Logger @@ -53,7 +47,7 @@ type RaftCluster struct { cid types.ID v2store v2store.Store - be backend.Backend + be MembershipBackend sync.Mutex // guards the fields below version *semver.Version @@ -62,7 +56,9 @@ type RaftCluster struct { // removed id cannot be reused. removed map[types.ID]bool - downgradeInfo *DowngradeInfo + downgradeInfo *serverversion.DowngradeInfo + maxLearners int + versionChanged *notify.Notifier } // ConfigChangeContext represents a context for confChange. @@ -83,8 +79,8 @@ const ( // NewClusterFromURLsMap creates a new raft cluster using provided urls map. Currently, it does not support creating // cluster with raft learner member. -func NewClusterFromURLsMap(lg *zap.Logger, token string, urlsmap types.URLsMap) (*RaftCluster, error) { - c := NewCluster(lg) +func NewClusterFromURLsMap(lg *zap.Logger, token string, urlsmap types.URLsMap, opts ...ClusterOption) (*RaftCluster, error) { + c := NewCluster(lg, opts...) for name, urls := range urlsmap { m := NewMember(name, urls, token, nil) if _, ok := c.members[m.ID]; ok { @@ -99,8 +95,8 @@ func NewClusterFromURLsMap(lg *zap.Logger, token string, urlsmap types.URLsMap) return c, nil } -func NewClusterFromMembers(lg *zap.Logger, id types.ID, membs []*Member) *RaftCluster { - c := NewCluster(lg) +func NewClusterFromMembers(lg *zap.Logger, id types.ID, membs []*Member, opts ...ClusterOption) *RaftCluster { + c := NewCluster(lg, opts...) c.cid = id for _, m := range membs { c.members[m.ID] = m @@ -108,15 +104,18 @@ func NewClusterFromMembers(lg *zap.Logger, id types.ID, membs []*Member) *RaftCl return c } -func NewCluster(lg *zap.Logger) *RaftCluster { +func NewCluster(lg *zap.Logger, opts ...ClusterOption) *RaftCluster { if lg == nil { lg = zap.NewNop() } + clOpts := newClusterOpts(opts...) + return &RaftCluster{ lg: lg, members: make(map[types.ID]*Member), removed: make(map[types.ID]bool), - downgradeInfo: &DowngradeInfo{Enabled: false}, + downgradeInfo: &serverversion.DowngradeInfo{Enabled: false}, + maxLearners: clOpts.maxLearners, } } @@ -130,7 +129,7 @@ func (c *RaftCluster) Members() []*Member { ms = append(ms, m.Clone()) } sort.Sort(ms) - return []*Member(ms) + return ms } func (c *RaftCluster) Member(id types.ID) *Member { @@ -149,7 +148,7 @@ func (c *RaftCluster) VotingMembers() []*Member { } } sort.Sort(ms) - return []*Member(ms) + return ms } // MemberByName returns a Member with the given name if exists. @@ -215,7 +214,7 @@ func (c *RaftCluster) ClientURLs() []string { func (c *RaftCluster) String() string { c.Lock() defer c.Unlock() - b := &bytes.Buffer{} + b := &strings.Builder{} fmt.Fprintf(b, "{ClusterID:%s ", c.cid) var ms []string for _, m := range c.members { @@ -243,44 +242,59 @@ func (c *RaftCluster) genID() { func (c *RaftCluster) SetID(localID, cid types.ID) { c.localID = localID c.cid = cid + c.buildMembershipMetric() } func (c *RaftCluster) SetStore(st v2store.Store) { c.v2store = st } -func (c *RaftCluster) SetBackend(be backend.Backend) { +func (c *RaftCluster) SetBackend(be MembershipBackend) { c.be = be - mustCreateBackendBuckets(c.be) + c.be.MustCreateBackendBuckets() +} + +func (c *RaftCluster) SetVersionChangedNotifier(n *notify.Notifier) { + c.versionChanged = n } func (c *RaftCluster) Recover(onSet func(*zap.Logger, *semver.Version)) { c.Lock() defer c.Unlock() - if c.v2store != nil { + if c.be != nil { + c.version = c.be.ClusterVersionFromBackend() + c.members, c.removed = c.be.MustReadMembersFromBackend() + } else { c.version = clusterVersionFromStore(c.lg, c.v2store) c.members, c.removed = membersFromStore(c.lg, c.v2store) - } else { - c.version = clusterVersionFromBackend(c.lg, c.be) - c.members, c.removed = membersFromBackend(c.lg, c.be) } + c.buildMembershipMetric() if c.be != nil { - c.downgradeInfo = downgradeInfoFromBackend(c.lg, c.be) + c.downgradeInfo = c.be.DowngradeInfoFromBackend() } - d := &DowngradeInfo{Enabled: false} - if c.downgradeInfo != nil { - d = &DowngradeInfo{Enabled: c.downgradeInfo.Enabled, TargetVersion: c.downgradeInfo.TargetVersion} + sv := semver.Must(semver.NewVersion(version.Version)) + if c.downgradeInfo != nil && c.downgradeInfo.Enabled { + c.lg.Info( + "cluster is downgrading to target version", + zap.String("target-cluster-version", c.downgradeInfo.TargetVersion), + zap.String("current-server-version", sv.String()), + ) } - mustDetectDowngrade(c.lg, c.version, d) + serverversion.MustDetectDowngrade(c.lg, sv, c.version) onSet(c.lg, c.version) for _, m := range c.members { + if c.localID == m.ID { + setIsLearnerMetric(m) + } + c.lg.Info( "recovered/added member from store", zap.String("cluster-id", c.cid.String()), zap.String("local-member-id", c.localID.String()), zap.String("recovered-remote-peer-id", m.ID.String()), zap.Strings("recovered-remote-peer-urls", m.PeerURLs), + zap.Bool("recovered-remote-peer-is-learner", m.IsLearner), ) } if c.version != nil { @@ -295,9 +309,9 @@ func (c *RaftCluster) Recover(onSet func(*zap.Logger, *semver.Version)) { // ensures that it is still valid. func (c *RaftCluster) ValidateConfigurationChange(cc raftpb.ConfChange) error { // TODO: this must be switched to backend as well. - members, removed := membersFromStore(c.lg, c.v2store) + membersMap, removedMap := membersFromStore(c.lg, c.v2store) id := types.ID(cc.NodeID) - if removed[id] { + if removedMap[id] { return ErrIDRemoved } switch cc.Type { @@ -308,19 +322,21 @@ func (c *RaftCluster) ValidateConfigurationChange(cc raftpb.ConfChange) error { } if confChangeContext.IsPromote { // promoting a learner member to voting member - if members[id] == nil { + if membersMap[id] == nil { return ErrIDNotFound } - if !members[id].IsLearner { + if !membersMap[id].IsLearner { return ErrMemberNotLearner } } else { // adding a new member - if members[id] != nil { + if membersMap[id] != nil { return ErrIDExists } + var members []*Member urls := make(map[string]bool) - for _, m := range members { + for _, m := range membersMap { + members = append(members, m) for _, u := range m.PeerURLs { urls[u] = true } @@ -331,29 +347,24 @@ func (c *RaftCluster) ValidateConfigurationChange(cc raftpb.ConfChange) error { } } - if confChangeContext.Member.IsLearner { // the new member is a learner - numLearners := 0 - for _, m := range members { - if m.IsLearner { - numLearners++ - } - } - if numLearners+1 > maxLearners { - return ErrTooManyLearners + if confChangeContext.Member.RaftAttributes.IsLearner && cc.Type == raftpb.ConfChangeAddLearnerNode { // the new member is a learner + scaleUpLearners := true + if err := ValidateMaxLearnerConfig(c.maxLearners, members, scaleUpLearners); err != nil { + return err } } } case raftpb.ConfChangeRemoveNode: - if members[id] == nil { + if membersMap[id] == nil { return ErrIDNotFound } case raftpb.ConfChangeUpdateNode: - if members[id] == nil { + if membersMap[id] == nil { return ErrIDNotFound } urls := make(map[string]bool) - for _, m := range members { + for _, m := range membersMap { if m.ID == id { continue } @@ -383,48 +394,37 @@ func (c *RaftCluster) ValidateConfigurationChange(cc raftpb.ConfChange) error { func (c *RaftCluster) AddMember(m *Member, shouldApplyV3 ShouldApplyV3) { c.Lock() defer c.Unlock() - - var v2Err, beErr error if c.v2store != nil { - v2Err = unsafeSaveMemberToStore(c.lg, c.v2store, m) - if v2Err != nil { - if e, ok := v2Err.(*v2error.Error); !ok || e.ErrorCode != v2error.EcodeNodeExist { - c.lg.Panic( - "failed to save member to store", - zap.String("member-id", m.ID.String()), - zap.Error(v2Err), - ) - } - } + mustSaveMemberToStore(c.lg, c.v2store, m) } - if c.be != nil && shouldApplyV3 { - beErr = unsafeSaveMemberToBackend(c.lg, c.be, m) - if beErr != nil && !errors.Is(beErr, errMemberAlreadyExist) { - c.lg.Panic( - "failed to save member to backend", - zap.String("member-id", m.ID.String()), - zap.Error(beErr), - ) - } - } - // Panic if both storeV2 and backend report member already exist. - if v2Err != nil && (beErr != nil || c.be == nil) { - c.lg.Panic( - "failed to save member to store", - zap.String("member-id", m.ID.String()), - zap.Error(v2Err), - ) + + if m.ID == c.localID { + setIsLearnerMetric(m) } - c.members[m.ID] = m + if c.be != nil && shouldApplyV3 { + c.be.MustSaveMemberToBackend(m) - c.lg.Info( - "added member", - zap.String("cluster-id", c.cid.String()), - zap.String("local-member-id", c.localID.String()), - zap.String("added-peer-id", m.ID.String()), - zap.Strings("added-peer-peer-urls", m.PeerURLs), - ) + c.members[m.ID] = m + c.updateMembershipMetric(m.ID, true) + + c.lg.Info( + "added member", + zap.String("cluster-id", c.cid.String()), + zap.String("local-member-id", c.localID.String()), + zap.String("added-peer-id", m.ID.String()), + zap.Strings("added-peer-peer-urls", m.PeerURLs), + zap.Bool("added-peer-is-learner", m.IsLearner), + ) + } else { + c.lg.Info( + "ignore already added member", + zap.String("cluster-id", c.cid.String()), + zap.String("local-member-id", c.localID.String()), + zap.String("added-peer-id", m.ID.String()), + zap.Strings("added-peer-peer-urls", m.PeerURLs), + zap.Bool("added-peer-is-learner", m.IsLearner)) + } } // RemoveMember removes a member from the store. @@ -432,53 +432,37 @@ func (c *RaftCluster) AddMember(m *Member, shouldApplyV3 ShouldApplyV3) { func (c *RaftCluster) RemoveMember(id types.ID, shouldApplyV3 ShouldApplyV3) { c.Lock() defer c.Unlock() - var v2Err, beErr error if c.v2store != nil { - v2Err = unsafeDeleteMemberFromStore(c.v2store, id) - if v2Err != nil { - if e, ok := v2Err.(*v2error.Error); !ok || e.ErrorCode != v2error.EcodeKeyNotFound { - c.lg.Panic( - "failed to delete member from store", - zap.String("member-id", id.String()), - zap.Error(v2Err), - ) - } - } + mustDeleteMemberFromStore(c.lg, c.v2store, id) } if c.be != nil && shouldApplyV3 { - beErr = unsafeDeleteMemberFromBackend(c.be, id) - if beErr != nil && !errors.Is(beErr, errMemberNotFound) { - c.lg.Panic( - "failed to delete member from backend", - zap.String("member-id", id.String()), - zap.Error(beErr), + c.be.MustDeleteMemberFromBackend(id) + + m, ok := c.members[id] + delete(c.members, id) + c.removed[id] = true + c.updateMembershipMetric(id, false) + + if ok { + c.lg.Info( + "removed member", + zap.String("cluster-id", c.cid.String()), + zap.String("local-member-id", c.localID.String()), + zap.String("removed-remote-peer-id", id.String()), + zap.Strings("removed-remote-peer-urls", m.PeerURLs), + zap.Bool("removed-remote-peer-is-learner", m.IsLearner), + ) + } else { + c.lg.Warn( + "skipped removing already removed member", + zap.String("cluster-id", c.cid.String()), + zap.String("local-member-id", c.localID.String()), + zap.String("removed-remote-peer-id", id.String()), ) } - } - // Panic if both storeV2 and backend report member not found. - if v2Err != nil && (beErr != nil || c.be == nil) { - c.lg.Panic( - "failed to delete member from store", - zap.String("member-id", id.String()), - zap.Error(v2Err), - ) - } - - m, ok := c.members[id] - delete(c.members, id) - c.removed[id] = true - - if ok { - c.lg.Info( - "removed member", - zap.String("cluster-id", c.cid.String()), - zap.String("local-member-id", c.localID.String()), - zap.String("removed-remote-peer-id", id.String()), - zap.Strings("removed-remote-peer-urls", m.PeerURLs), - ) } else { - c.lg.Warn( - "skipped removing already removed member", + c.lg.Info( + "ignore already removed member", zap.String("cluster-id", c.cid.String()), zap.String("local-member-id", c.localID.String()), zap.String("removed-remote-peer-id", id.String()), @@ -496,7 +480,7 @@ func (c *RaftCluster) UpdateAttributes(id types.ID, attr Attributes, shouldApply mustUpdateMemberAttrInStore(c.lg, c.v2store, m) } if c.be != nil && shouldApplyV3 { - unsafeSaveMemberToBackend(c.lg, c.be, m) + c.be.MustSaveMemberToBackend(m) } return } @@ -524,40 +508,66 @@ func (c *RaftCluster) PromoteMember(id types.ID, shouldApplyV3 ShouldApplyV3) { c.Lock() defer c.Unlock() - c.members[id].RaftAttributes.IsLearner = false if c.v2store != nil { - mustUpdateMemberInStore(c.lg, c.v2store, c.members[id]) + m := *(c.members[id]) + m.RaftAttributes.IsLearner = false + mustUpdateMemberInStore(c.lg, c.v2store, &m) } - if c.be != nil && shouldApplyV3 { - unsafeSaveMemberToBackend(c.lg, c.be, c.members[id]) + + if id == c.localID { + isLearner.Set(0) } - c.lg.Info( - "promote member", - zap.String("cluster-id", c.cid.String()), - zap.String("local-member-id", c.localID.String()), - ) + if c.be != nil && shouldApplyV3 { + c.members[id].RaftAttributes.IsLearner = false + c.updateMembershipMetric(id, true) + c.be.MustSaveMemberToBackend(c.members[id]) + + c.lg.Info( + "promote member", + zap.String("cluster-id", c.cid.String()), + zap.String("local-member-id", c.localID.String()), + ) + } else { + c.lg.Info( + "ignore already promoted member", + zap.String("cluster-id", c.cid.String()), + zap.String("local-member-id", c.localID.String()), + ) + } } func (c *RaftCluster) UpdateRaftAttributes(id types.ID, raftAttr RaftAttributes, shouldApplyV3 ShouldApplyV3) { c.Lock() defer c.Unlock() - c.members[id].RaftAttributes = raftAttr if c.v2store != nil { - mustUpdateMemberInStore(c.lg, c.v2store, c.members[id]) + m := *(c.members[id]) + m.RaftAttributes = raftAttr + mustUpdateMemberInStore(c.lg, c.v2store, &m) } if c.be != nil && shouldApplyV3 { - unsafeSaveMemberToBackend(c.lg, c.be, c.members[id]) - } + c.members[id].RaftAttributes = raftAttr + c.be.MustSaveMemberToBackend(c.members[id]) - c.lg.Info( - "updated member", - zap.String("cluster-id", c.cid.String()), - zap.String("local-member-id", c.localID.String()), - zap.String("updated-remote-peer-id", id.String()), - zap.Strings("updated-remote-peer-urls", raftAttr.PeerURLs), - ) + c.lg.Info( + "updated member", + zap.String("cluster-id", c.cid.String()), + zap.String("local-member-id", c.localID.String()), + zap.String("updated-remote-peer-id", id.String()), + zap.Strings("updated-remote-peer-urls", raftAttr.PeerURLs), + zap.Bool("updated-remote-peer-is-learner", raftAttr.IsLearner), + ) + } else { + c.lg.Info( + "ignored already updated member", + zap.String("cluster-id", c.cid.String()), + zap.String("local-member-id", c.localID.String()), + zap.String("updated-remote-peer-id", id.String()), + zap.Strings("updated-remote-peer-urls", raftAttr.PeerURLs), + zap.Bool("updated-remote-peer-is-learner", raftAttr.IsLearner), + ) + } } func (c *RaftCluster) Version() *semver.Version { @@ -590,17 +600,21 @@ func (c *RaftCluster) SetVersion(ver *semver.Version, onSet func(*zap.Logger, *s } oldVer := c.version c.version = ver - mustDetectDowngrade(c.lg, c.version, c.downgradeInfo) + sv := semver.Must(semver.NewVersion(version.Version)) + serverversion.MustDetectDowngrade(c.lg, sv, c.version) if c.v2store != nil { mustSaveClusterVersionToStore(c.lg, c.v2store, ver) } if c.be != nil && shouldApplyV3 { - mustSaveClusterVersionToBackend(c.be, ver) + c.be.MustSaveClusterVersionToBackend(ver) } if oldVer != nil { ClusterVersionMetrics.With(prometheus.Labels{"cluster_version": version.Cluster(oldVer.String())}).Set(0) } ClusterVersionMetrics.With(prometheus.Labels{"cluster_version": version.Cluster(ver.String())}).Set(1) + if c.versionChanged != nil { + c.versionChanged.Notify() + } onSet(c.lg, ver) } @@ -729,78 +743,6 @@ func membersFromStore(lg *zap.Logger, st v2store.Store) (map[types.ID]*Member, m return members, removed } -func membersFromBackend(lg *zap.Logger, be backend.Backend) (map[types.ID]*Member, map[types.ID]bool) { - return mustReadMembersFromBackend(lg, be) -} - -func clusterVersionFromStore(lg *zap.Logger, st v2store.Store) *semver.Version { - e, err := st.Get(path.Join(storePrefix, "version"), false, false) - if err != nil { - if isKeyNotFound(err) { - return nil - } - lg.Panic( - "failed to get cluster version from store", - zap.String("path", path.Join(storePrefix, "version")), - zap.Error(err), - ) - } - return semver.Must(semver.NewVersion(*e.Node.Value)) -} - -// The field is populated since etcd v3.5. -func clusterVersionFromBackend(lg *zap.Logger, be backend.Backend) *semver.Version { - ckey := backendClusterVersionKey() - tx := be.ReadTx() - tx.RLock() - defer tx.RUnlock() - keys, vals := tx.UnsafeRange(buckets.Cluster, ckey, nil, 0) - if len(keys) == 0 { - return nil - } - if len(keys) != 1 { - lg.Panic( - "unexpected number of keys when getting cluster version from backend", - zap.Int("number-of-key", len(keys)), - ) - } - return semver.Must(semver.NewVersion(string(vals[0]))) -} - -// The field is populated since etcd v3.5. -func downgradeInfoFromBackend(lg *zap.Logger, be backend.Backend) *DowngradeInfo { - dkey := backendDowngradeKey() - tx := be.ReadTx() - tx.Lock() - defer tx.Unlock() - keys, vals := tx.UnsafeRange(buckets.Cluster, dkey, nil, 0) - if len(keys) == 0 { - return nil - } - - if len(keys) != 1 { - lg.Panic( - "unexpected number of keys when getting cluster version from backend", - zap.Int("number-of-key", len(keys)), - ) - } - var d DowngradeInfo - if err := json.Unmarshal(vals[0], &d); err != nil { - lg.Panic("failed to unmarshal downgrade information", zap.Error(err)) - } - - // verify the downgrade info from backend - if d.Enabled { - if _, err := semver.NewVersion(d.TargetVersion); err != nil { - lg.Panic( - "unexpected version format of the downgrade target version from backend", - zap.String("target-version", d.TargetVersion), - ) - } - } - return &d -} - // ValidateClusterAndAssignIDs validates the local cluster by matching the PeerURLs // with the existing cluster. If the validation succeeds, it assigns the IDs // from the existing cluster to the local cluster. @@ -824,32 +766,17 @@ func ValidateClusterAndAssignIDs(lg *zap.Logger, local *RaftCluster, existing *R } } if !ok { - return fmt.Errorf("PeerURLs: no match found for existing member (%v, %v), last resolver error (%v)", ems[i].ID, ems[i].PeerURLs, err) + return fmt.Errorf("PeerURLs: no match found for existing member (%v, %v), last resolver error (%w)", ems[i].ID, ems[i].PeerURLs, err) } } local.members = make(map[types.ID]*Member) for _, m := range lms { local.members[m.ID] = m } + local.buildMembershipMetric() return nil } -// IsValidVersionChange checks the two scenario when version is valid to change: -// 1. Downgrade: cluster version is 1 minor version higher than local version, -// cluster version should change. -// 2. Cluster start: when not all members version are available, cluster version -// is set to MinVersion(3.0), when all members are at higher version, cluster version -// is lower than local version, cluster version should change -func IsValidVersionChange(cv *semver.Version, lv *semver.Version) bool { - cv = &semver.Version{Major: cv.Major, Minor: cv.Minor} - lv = &semver.Version{Major: lv.Major, Minor: lv.Minor} - - if isValidDowngrade(cv, lv) || (cv.Major == lv.Major && cv.LessThan(*lv)) { - return true - } - return false -} - // IsLocalMemberLearner returns if the local member is raft learner func (c *RaftCluster) IsLocalMemberLearner() bool { c.Lock() @@ -866,40 +793,34 @@ func (c *RaftCluster) IsLocalMemberLearner() bool { } // DowngradeInfo returns the downgrade status of the cluster -func (c *RaftCluster) DowngradeInfo() *DowngradeInfo { +func (c *RaftCluster) DowngradeInfo() *serverversion.DowngradeInfo { c.Lock() defer c.Unlock() if c.downgradeInfo == nil { - return &DowngradeInfo{Enabled: false} + return &serverversion.DowngradeInfo{Enabled: false} } - d := &DowngradeInfo{Enabled: c.downgradeInfo.Enabled, TargetVersion: c.downgradeInfo.TargetVersion} + d := &serverversion.DowngradeInfo{Enabled: c.downgradeInfo.Enabled, TargetVersion: c.downgradeInfo.TargetVersion} return d } -func (c *RaftCluster) SetDowngradeInfo(d *DowngradeInfo, shouldApplyV3 ShouldApplyV3) { +func (c *RaftCluster) SetDowngradeInfo(d *serverversion.DowngradeInfo, shouldApplyV3 ShouldApplyV3) { c.Lock() defer c.Unlock() if c.be != nil && shouldApplyV3 { - mustSaveDowngradeToBackend(c.lg, c.be, d) + c.be.MustSaveDowngradeToBackend(d) } c.downgradeInfo = d - - if d.Enabled { - c.lg.Info( - "The server is ready to downgrade", - zap.String("target-version", d.TargetVersion), - zap.String("server-version", version.Version), - ) - } } // IsMemberExist returns if the member with the given id exists in cluster. func (c *RaftCluster) IsMemberExist(id types.ID) bool { c.Lock() - defer c.Unlock() _, ok := c.members[id] + c.Unlock() + + // gofail: var afterIsMemberExist struct{} return ok } @@ -917,19 +838,79 @@ func (c *RaftCluster) VotingMemberIDs() []types.ID { return ids } -// PushMembershipToStorage is overriding storage information about cluster's -// members, such that they fully reflect internal RaftCluster's storage. -func (c *RaftCluster) PushMembershipToStorage() { - if c.be != nil { - TrimMembershipFromBackend(c.lg, c.be) - for _, m := range c.members { - unsafeSaveMemberToBackend(c.lg, c.be, m) +// buildMembershipMetric sets the knownPeers metric based on the current +// members of the cluster. +func (c *RaftCluster) buildMembershipMetric() { + if c.localID == 0 { + // We don't know our own id yet. + return + } + for p := range c.members { + knownPeers.WithLabelValues(c.localID.String(), p.String()).Set(1) + } + for p := range c.removed { + knownPeers.WithLabelValues(c.localID.String(), p.String()).Set(0) + } +} + +// updateMembershipMetric updates the knownPeers metric to indicate that +// the given peer is now (un)known. +func (c *RaftCluster) updateMembershipMetric(peer types.ID, known bool) { + if c.localID == 0 { + // We don't know our own id yet. + return + } + v := float64(0) + if known { + v = 1 + } + knownPeers.WithLabelValues(c.localID.String(), peer.String()).Set(v) +} + +// ValidateMaxLearnerConfig verifies the existing learner members in the cluster membership and an optional N+1 learner +// scale up are not more than maxLearners. +func ValidateMaxLearnerConfig(maxLearners int, members []*Member, scaleUpLearners bool) error { + numLearners := 0 + for _, m := range members { + if m.IsLearner { + numLearners++ } } - if c.v2store != nil { - TrimMembershipFromV2Store(c.lg, c.v2store) - for _, m := range c.members { - mustSaveMemberToStore(c.lg, c.v2store, m) + // Validate config can accommodate scale up. + if scaleUpLearners { + numLearners++ + } + + if numLearners > maxLearners { + return ErrTooManyLearners + } + + return nil +} + +func (c *RaftCluster) Store(store v2store.Store) { + c.Lock() + defer c.Unlock() + + verifyNoMembersInStore(c.lg, store) + + for _, m := range c.members { + mustSaveMemberToStore(c.lg, store, m) + if m.ClientURLs != nil { + mustUpdateMemberAttrInStore(c.lg, store, m) } + c.lg.Info( + "snapshot storing member", + zap.String("id", m.ID.String()), + zap.Strings("peer-urls", m.PeerURLs), + zap.Bool("is-learner", m.IsLearner), + ) + } + for id := range c.removed { + //We do not need to delete the member since the store is empty. + mustAddToRemovedMembersInStore(c.lg, store, id) + } + if c.version != nil { + mustSaveClusterVersionToStore(c.lg, store, c.version) } } diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/cluster_opts.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/cluster_opts.go new file mode 100644 index 0000000000000..204fbf04d2c5e --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/cluster_opts.go @@ -0,0 +1,43 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package membership + +const DefaultMaxLearners = 1 + +type ClusterOptions struct { + maxLearners int +} + +// ClusterOption are options which can be applied to the raft cluster. +type ClusterOption func(*ClusterOptions) + +func newClusterOpts(opts ...ClusterOption) *ClusterOptions { + clOpts := &ClusterOptions{} + clOpts.applyOpts(opts) + return clOpts +} + +func (co *ClusterOptions) applyOpts(opts []ClusterOption) { + for _, opt := range opts { + opt(co) + } +} + +// WithMaxLearners sets the maximum number of learners that can exist in the cluster membership. +func WithMaxLearners(max int) ClusterOption { + return func(co *ClusterOptions) { + co.maxLearners = max + } +} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/errors.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/errors.go index e944d48c69372..ff682971281d2 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/errors.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/errors.go @@ -30,6 +30,6 @@ var ( ) func isKeyNotFound(err error) bool { - e, ok := err.(*v2error.Error) - return ok && e.ErrorCode == v2error.EcodeKeyNotFound + var e *v2error.Error + return errors.As(err, &e) && e.ErrorCode == v2error.EcodeKeyNotFound } diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/member.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/member.go index 696549d2d261b..b6037bf9d6131 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/member.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/member.go @@ -18,7 +18,6 @@ import ( "crypto/sha1" "encoding/binary" "fmt" - "math/rand" "sort" "strings" "time" @@ -50,18 +49,18 @@ type Member struct { // NewMember creates a Member without an ID and generates one based on the // cluster name, peer URLs, and time. This is used for bootstrapping/adding new member. func NewMember(name string, peerURLs types.URLs, clusterName string, now *time.Time) *Member { - memberId := computeMemberId(peerURLs, clusterName, now) - return newMember(name, peerURLs, memberId, false) + memberID := computeMemberID(peerURLs, clusterName, now) + return newMember(name, peerURLs, memberID, false) } // NewMemberAsLearner creates a learner Member without an ID and generates one based on the // cluster name, peer URLs, and time. This is used for adding new learner member. func NewMemberAsLearner(name string, peerURLs types.URLs, clusterName string, now *time.Time) *Member { - memberId := computeMemberId(peerURLs, clusterName, now) - return newMember(name, peerURLs, memberId, true) + memberID := computeMemberID(peerURLs, clusterName, now) + return newMember(name, peerURLs, memberID, true) } -func computeMemberId(peerURLs types.URLs, clusterName string, now *time.Time) types.ID { +func computeMemberID(peerURLs types.URLs, clusterName string, now *time.Time) types.ID { peerURLstrs := peerURLs.StringSlice() sort.Strings(peerURLstrs) joinedPeerUrls := strings.Join(peerURLstrs, "") @@ -76,27 +75,18 @@ func computeMemberId(peerURLs types.URLs, clusterName string, now *time.Time) ty return types.ID(binary.BigEndian.Uint64(hash[:8])) } -func newMember(name string, peerURLs types.URLs, memberId types.ID, isLearner bool) *Member { +func newMember(name string, peerURLs types.URLs, memberID types.ID, isLearner bool) *Member { m := &Member{ RaftAttributes: RaftAttributes{ PeerURLs: peerURLs.StringSlice(), IsLearner: isLearner, }, Attributes: Attributes{Name: name}, - ID: memberId, + ID: memberID, } return m } -// PickPeerURL chooses a random address from a given Member's PeerURLs. -// It will panic if there is no PeerURLs available in Member. -func (m *Member) PickPeerURL() string { - if len(m.PeerURLs) == 0 { - panic("member should always have some peer url") - } - return m.PeerURLs[rand.Intn(len(m.PeerURLs))] -} - func (m *Member) Clone() *Member { if m == nil { return nil diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/metrics.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/metrics.go index b3212bc80cdbd..7eb6fb6b728fe 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/metrics.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/metrics.go @@ -24,8 +24,32 @@ var ( Help: "Which version is running. 1 for 'cluster_version' label with current cluster version", }, []string{"cluster_version"}) + knownPeers = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "etcd", + Subsystem: "network", + Name: "known_peers", + Help: "The current number of known peers.", + }, + []string{"Local", "Remote"}, + ) + isLearner = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "etcd", + Subsystem: "server", + Name: "is_learner", + Help: "Whether or not this member is a learner. 1 if is, 0 otherwise.", + }) ) +func setIsLearnerMetric(m *Member) { + if m.IsLearner { + isLearner.Set(1) + } else { + isLearner.Set(0) + } +} + func init() { prometheus.MustRegister(ClusterVersionMetrics) + prometheus.MustRegister(knownPeers) + prometheus.MustRegister(isLearner) } diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/store.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/store.go index fadc818223316..8ada160b3121d 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/store.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/store.go @@ -1,4 +1,4 @@ -// Copyright 2016 The etcd Authors +// Copyright 2021 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,332 +15,37 @@ package membership import ( - "bytes" - "encoding/json" - "fmt" "path" - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" - "go.etcd.io/etcd/server/v3/mvcc/backend" - "go.etcd.io/etcd/server/v3/mvcc/buckets" - "github.com/coreos/go-semver/semver" "go.uber.org/zap" -) - -const ( - attributesSuffix = "attributes" - raftAttributesSuffix = "raftAttributes" - - // the prefix for storing membership related information in store provided by store pkg. - storePrefix = "/0" -) -var ( - StoreMembersPrefix = path.Join(storePrefix, "members") - storeRemovedMembersPrefix = path.Join(storePrefix, "removed_members") - errMemberAlreadyExist = fmt.Errorf("member already exists") - errMemberNotFound = fmt.Errorf("member not found") + "go.etcd.io/etcd/client/pkg/v3/types" + "go.etcd.io/etcd/server/v3/etcdserver/version" ) -func unsafeSaveMemberToBackend(lg *zap.Logger, be backend.Backend, m *Member) error { - mkey := backendMemberKey(m.ID) - mvalue, err := json.Marshal(m) - if err != nil { - lg.Panic("failed to marshal member", zap.Error(err)) - } - - tx := be.BatchTx() - tx.LockInsideApply() - defer tx.Unlock() - if unsafeMemberExists(tx, mkey) { - return errMemberAlreadyExist - } - tx.UnsafePut(buckets.Members, mkey, mvalue) - return nil -} - -// TrimClusterFromBackend removes all information about cluster (versions) -// from the v3 backend. -func TrimClusterFromBackend(be backend.Backend) error { - tx := be.BatchTx() - tx.LockOutsideApply() - defer tx.Unlock() - tx.UnsafeDeleteBucket(buckets.Cluster) - return nil -} - -func unsafeDeleteMemberFromBackend(be backend.Backend, id types.ID) error { - mkey := backendMemberKey(id) - - tx := be.BatchTx() - tx.LockInsideApply() - defer tx.Unlock() - tx.UnsafePut(buckets.MembersRemoved, mkey, []byte("removed")) - if !unsafeMemberExists(tx, mkey) { - return errMemberNotFound - } - tx.UnsafeDelete(buckets.Members, mkey) - return nil -} - -func unsafeMemberExists(tx backend.ReadTx, mkey []byte) bool { - var found bool - tx.UnsafeForEach(buckets.Members, func(k, v []byte) error { - if bytes.Equal(k, mkey) { - found = true - } - return nil - }) - return found -} - -func readMembersFromBackend(lg *zap.Logger, be backend.Backend) (map[types.ID]*Member, map[types.ID]bool, error) { - members := make(map[types.ID]*Member) - removed := make(map[types.ID]bool) - - tx := be.ReadTx() - tx.RLock() - defer tx.RUnlock() - err := tx.UnsafeForEach(buckets.Members, func(k, v []byte) error { - memberId := mustParseMemberIDFromBytes(lg, k) - m := &Member{ID: memberId} - if err := json.Unmarshal(v, &m); err != nil { - return err - } - members[memberId] = m - return nil - }) - if err != nil { - return nil, nil, fmt.Errorf("couldn't read members from backend: %w", err) - } - - err = tx.UnsafeForEach(buckets.MembersRemoved, func(k, v []byte) error { - memberId := mustParseMemberIDFromBytes(lg, k) - removed[memberId] = true - return nil - }) - if err != nil { - return nil, nil, fmt.Errorf("couldn't read members_removed from backend: %w", err) - } - return members, removed, nil -} - -func mustReadMembersFromBackend(lg *zap.Logger, be backend.Backend) (map[types.ID]*Member, map[types.ID]bool) { - members, removed, err := readMembersFromBackend(lg, be) - if err != nil { - lg.Panic("couldn't read members from backend", zap.Error(err)) - } - return members, removed -} - -// TrimMembershipFromBackend removes all information about members & -// removed_members from the v3 backend. -func TrimMembershipFromBackend(lg *zap.Logger, be backend.Backend) error { - lg.Info("Trimming membership information from the backend...") - tx := be.BatchTx() - tx.LockOutsideApply() - defer tx.Unlock() - err := tx.UnsafeForEach(buckets.Members, func(k, v []byte) error { - tx.UnsafeDelete(buckets.Members, k) - lg.Debug("Removed member from the backend", - zap.Stringer("member", mustParseMemberIDFromBytes(lg, k))) - return nil - }) - if err != nil { - return err - } - return tx.UnsafeForEach(buckets.MembersRemoved, func(k, v []byte) error { - tx.UnsafeDelete(buckets.MembersRemoved, k) - lg.Debug("Removed removed_member from the backend", - zap.Stringer("member", mustParseMemberIDFromBytes(lg, k))) - return nil - }) -} - -// TrimMembershipFromV2Store removes all information about members & -// removed_members from the v2 store. -func TrimMembershipFromV2Store(lg *zap.Logger, s v2store.Store) error { - members, removed := membersFromStore(lg, s) - - for mID := range members { - _, err := s.Delete(MemberStoreKey(mID), true, true) - if err != nil { - return err - } - } - for mID := range removed { - _, err := s.Delete(RemovedMemberStoreKey(mID), true, true) - if err != nil { - return err - } - } - - return nil -} - -// The field is populated since etcd v3.5. -func mustSaveClusterVersionToBackend(be backend.Backend, ver *semver.Version) { - ckey := backendClusterVersionKey() - - tx := be.BatchTx() - tx.LockInsideApply() - defer tx.Unlock() - tx.UnsafePut(buckets.Cluster, ckey, []byte(ver.String())) -} - -// The field is populated since etcd v3.5. -func mustSaveDowngradeToBackend(lg *zap.Logger, be backend.Backend, downgrade *DowngradeInfo) { - dkey := backendDowngradeKey() - dvalue, err := json.Marshal(downgrade) - if err != nil { - lg.Panic("failed to marshal downgrade information", zap.Error(err)) - } - tx := be.BatchTx() - tx.LockInsideApply() - defer tx.Unlock() - tx.UnsafePut(buckets.Cluster, dkey, dvalue) -} - -func mustSaveMemberToStore(lg *zap.Logger, s v2store.Store, m *Member) { - err := unsafeSaveMemberToStore(lg, s, m) - if err != nil { - lg.Panic( - "failed to save member to store", - zap.String("member-id", m.ID.String()), - zap.Error(err), - ) - } -} - -func unsafeSaveMemberToStore(lg *zap.Logger, s v2store.Store, m *Member) error { - b, err := json.Marshal(m.RaftAttributes) - if err != nil { - lg.Panic("failed to marshal raftAttributes", zap.Error(err)) - } - p := path.Join(MemberStoreKey(m.ID), raftAttributesSuffix) - _, err = s.Create(p, false, string(b), false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - return err -} - -func unsafeDeleteMemberFromStore(s v2store.Store, id types.ID) error { - if _, err := s.Delete(MemberStoreKey(id), true, true); err != nil { - return err - } - if _, err := s.Create(RemovedMemberStoreKey(id), false, "", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil { - return err - } - return nil +type MembershipBackend interface { + ClusterVersionBackend + MemberBackend + DowngradeInfoBackend + MustCreateBackendBuckets() } -func mustUpdateMemberInStore(lg *zap.Logger, s v2store.Store, m *Member) { - b, err := json.Marshal(m.RaftAttributes) - if err != nil { - lg.Panic("failed to marshal raftAttributes", zap.Error(err)) - } - p := path.Join(MemberStoreKey(m.ID), raftAttributesSuffix) - if _, err := s.Update(p, string(b), v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil { - lg.Panic( - "failed to update raftAttributes", - zap.String("path", p), - zap.Error(err), - ) - } +type ClusterVersionBackend interface { + ClusterVersionFromBackend() *semver.Version + MustSaveClusterVersionToBackend(version *semver.Version) } -func mustUpdateMemberAttrInStore(lg *zap.Logger, s v2store.Store, m *Member) { - b, err := json.Marshal(m.Attributes) - if err != nil { - lg.Panic("failed to marshal attributes", zap.Error(err)) - } - p := path.Join(MemberStoreKey(m.ID), attributesSuffix) - if _, err := s.Set(p, false, string(b), v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil { - lg.Panic( - "failed to update attributes", - zap.String("path", p), - zap.Error(err), - ) - } +type MemberBackend interface { + MustReadMembersFromBackend() (map[types.ID]*Member, map[types.ID]bool) + MustSaveMemberToBackend(*Member) + TrimMembershipFromBackend() error + MustDeleteMemberFromBackend(types.ID) } -func mustSaveClusterVersionToStore(lg *zap.Logger, s v2store.Store, ver *semver.Version) { - if _, err := s.Set(StoreClusterVersionKey(), false, ver.String(), v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil { - lg.Panic( - "failed to save cluster version to store", - zap.String("path", StoreClusterVersionKey()), - zap.Error(err), - ) - } -} - -// nodeToMember builds member from a key value node. -// the child nodes of the given node MUST be sorted by key. -func nodeToMember(lg *zap.Logger, n *v2store.NodeExtern) (*Member, error) { - m := &Member{ID: MustParseMemberIDFromKey(lg, n.Key)} - attrs := make(map[string][]byte) - raftAttrKey := path.Join(n.Key, raftAttributesSuffix) - attrKey := path.Join(n.Key, attributesSuffix) - for _, nn := range n.Nodes { - if nn.Key != raftAttrKey && nn.Key != attrKey { - return nil, fmt.Errorf("unknown key %q", nn.Key) - } - attrs[nn.Key] = []byte(*nn.Value) - } - if data := attrs[raftAttrKey]; data != nil { - if err := json.Unmarshal(data, &m.RaftAttributes); err != nil { - return nil, fmt.Errorf("unmarshal raftAttributes error: %v", err) - } - } else { - return nil, fmt.Errorf("raftAttributes key doesn't exist") - } - if data := attrs[attrKey]; data != nil { - if err := json.Unmarshal(data, &m.Attributes); err != nil { - return m, fmt.Errorf("unmarshal attributes error: %v", err) - } - } - return m, nil -} - -func backendMemberKey(id types.ID) []byte { - return []byte(id.String()) -} - -func backendClusterVersionKey() []byte { - return []byte("clusterVersion") -} - -func backendDowngradeKey() []byte { - return []byte("downgrade") -} - -func mustCreateBackendBuckets(be backend.Backend) { - tx := be.BatchTx() - tx.LockOutsideApply() - defer tx.Unlock() - tx.UnsafeCreateBucket(buckets.Members) - tx.UnsafeCreateBucket(buckets.MembersRemoved) - tx.UnsafeCreateBucket(buckets.Cluster) -} - -func MemberStoreKey(id types.ID) string { - return path.Join(StoreMembersPrefix, id.String()) -} - -func StoreClusterVersionKey() string { - return path.Join(storePrefix, "version") -} - -func MemberAttributesStorePath(id types.ID) string { - return path.Join(MemberStoreKey(id), attributesSuffix) -} - -func mustParseMemberIDFromBytes(lg *zap.Logger, key []byte) types.ID { - id, err := types.IDFromString(string(key)) - if err != nil { - lg.Panic("failed to parse member id from key", zap.Error(err)) - } - return id +type DowngradeInfoBackend interface { + MustSaveDowngradeToBackend(*version.DowngradeInfo) + DowngradeInfoFromBackend() *version.DowngradeInfo } func MustParseMemberIDFromKey(lg *zap.Logger, key string) types.ID { @@ -350,7 +55,3 @@ func MustParseMemberIDFromKey(lg *zap.Logger, key string) types.ID { } return id } - -func RemovedMemberStoreKey(id types.ID) string { - return path.Join(storeRemovedMembersPrefix, id.String()) -} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/storev2.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/storev2.go index 8505c63f367ba..f7a595ac3e1a4 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/storev2.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/storev2.go @@ -15,9 +15,30 @@ package membership import ( + "encoding/json" + "fmt" + "path" + + "github.com/coreos/go-semver/semver" + "go.uber.org/zap" + + "go.etcd.io/etcd/client/pkg/v3/types" "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" ) +const ( + // the prefix for storing membership related information in store provided by store pkg. + storePrefix = "/0" + + attributesSuffix = "attributes" + raftAttributesSuffix = "raftAttributes" +) + +var ( + StoreMembersPrefix = path.Join(storePrefix, "members") + storeRemovedMembersPrefix = path.Join(storePrefix, "removed_members") +) + // IsMetaStoreOnly verifies if the given `store` contains only // a meta-information (members, version) that can be recovered from the // backend (storev3) as well as opposed to user-data. @@ -34,3 +55,146 @@ func IsMetaStoreOnly(store v2store.Store) (bool, error) { return true, nil } + +func verifyNoMembersInStore(lg *zap.Logger, s v2store.Store) { + members, removed := membersFromStore(lg, s) + if len(members) != 0 || len(removed) != 0 { + lg.Panic("store has membership info") + } +} + +func mustSaveMemberToStore(lg *zap.Logger, s v2store.Store, m *Member) { + b, err := json.Marshal(m.RaftAttributes) + if err != nil { + lg.Panic("failed to marshal raftAttributes", zap.Error(err)) + } + p := path.Join(MemberStoreKey(m.ID), raftAttributesSuffix) + if _, err := s.Create(p, false, string(b), false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil { + lg.Panic( + "failed to save member to store", + zap.String("path", p), + zap.Error(err), + ) + } +} + +func mustDeleteMemberFromStore(lg *zap.Logger, s v2store.Store, id types.ID) { + if _, err := s.Delete(MemberStoreKey(id), true, true); err != nil { + lg.Panic( + "failed to delete member from store", + zap.String("path", MemberStoreKey(id)), + zap.Error(err), + ) + } + + mustAddToRemovedMembersInStore(lg, s, id) +} + +func mustAddToRemovedMembersInStore(lg *zap.Logger, s v2store.Store, id types.ID) { + if _, err := s.Create(RemovedMemberStoreKey(id), false, "", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil { + lg.Panic( + "failed to create removedMember", + zap.String("path", RemovedMemberStoreKey(id)), + zap.Error(err), + ) + } +} + +func mustUpdateMemberInStore(lg *zap.Logger, s v2store.Store, m *Member) { + b, err := json.Marshal(m.RaftAttributes) + if err != nil { + lg.Panic("failed to marshal raftAttributes", zap.Error(err)) + } + p := path.Join(MemberStoreKey(m.ID), raftAttributesSuffix) + if _, err := s.Update(p, string(b), v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil { + lg.Panic( + "failed to update raftAttributes", + zap.String("path", p), + zap.Error(err), + ) + } +} + +func mustUpdateMemberAttrInStore(lg *zap.Logger, s v2store.Store, m *Member) { + b, err := json.Marshal(m.Attributes) + if err != nil { + lg.Panic("failed to marshal attributes", zap.Error(err)) + } + p := path.Join(MemberStoreKey(m.ID), attributesSuffix) + if _, err := s.Set(p, false, string(b), v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil { + lg.Panic( + "failed to update attributes", + zap.String("path", p), + zap.Error(err), + ) + } +} + +func mustSaveClusterVersionToStore(lg *zap.Logger, s v2store.Store, ver *semver.Version) { + if _, err := s.Set(StoreClusterVersionKey(), false, ver.String(), v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil { + lg.Panic( + "failed to save cluster version to store", + zap.String("path", StoreClusterVersionKey()), + zap.Error(err), + ) + } +} + +// nodeToMember builds member from a key value node. +// the child nodes of the given node MUST be sorted by key. +func nodeToMember(lg *zap.Logger, n *v2store.NodeExtern) (*Member, error) { + m := &Member{ID: MustParseMemberIDFromKey(lg, n.Key)} + attrs := make(map[string][]byte) + raftAttrKey := path.Join(n.Key, raftAttributesSuffix) + attrKey := path.Join(n.Key, attributesSuffix) + for _, nn := range n.Nodes { + if nn.Key != raftAttrKey && nn.Key != attrKey { + return nil, fmt.Errorf("unknown key %q", nn.Key) + } + attrs[nn.Key] = []byte(*nn.Value) + } + if data := attrs[raftAttrKey]; data != nil { + if err := json.Unmarshal(data, &m.RaftAttributes); err != nil { + return nil, fmt.Errorf("unmarshal raftAttributes error: %w", err) + } + } else { + return nil, fmt.Errorf("raftAttributes key doesn't exist") + } + if data := attrs[attrKey]; data != nil { + if err := json.Unmarshal(data, &m.Attributes); err != nil { + return m, fmt.Errorf("unmarshal attributes error: %w", err) + } + } + return m, nil +} + +func StoreClusterVersionKey() string { + return path.Join(storePrefix, "version") +} + +func RemovedMemberStoreKey(id types.ID) string { + return path.Join(storeRemovedMembersPrefix, id.String()) +} + +func MemberStoreKey(id types.ID) string { + return path.Join(StoreMembersPrefix, id.String()) +} + +func MemberAttributesStorePath(id types.ID) string { + return path.Join(MemberStoreKey(id), attributesSuffix) +} + +func clusterVersionFromStore(lg *zap.Logger, st v2store.Store) *semver.Version { + e, err := st.Get(path.Join(storePrefix, "version"), false, false) + if err != nil { + if isKeyNotFound(err) { + return nil + } + lg.Panic( + "failed to get cluster version from store", + zap.String("path", path.Join(storePrefix, "version")), + zap.Error(err), + ) + } + return semver.Must(semver.NewVersion(*e.Node.Value)) +} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/coder.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/coder.go index cc28249697a20..977442998966a 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/coder.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/coder.go @@ -14,7 +14,7 @@ package rafthttp -import "go.etcd.io/etcd/raft/v3/raftpb" +import "go.etcd.io/raft/v3/raftpb" type encoder interface { // encode encodes the given message to an output stream. diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/doc.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/doc.go index a9486a8bb664b..c45dc81782549 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/doc.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/doc.go @@ -12,5 +12,5 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package rafthttp implements HTTP transportation layer for etcd/raft pkg. +// Package rafthttp implements HTTP transportation layer for raft pkg. package rafthttp diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/http.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/http.go index f60383f4791c6..56b2f474132ab 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/http.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/http.go @@ -18,20 +18,20 @@ import ( "context" "errors" "fmt" - "io/ioutil" + "io" "net/http" "path" "strings" "time" + humanize "github.com/dustin/go-humanize" + "go.uber.org/zap" + "go.etcd.io/etcd/api/v3/version" "go.etcd.io/etcd/client/pkg/v3/types" pioutil "go.etcd.io/etcd/pkg/v3/ioutil" - "go.etcd.io/etcd/raft/v3/raftpb" "go.etcd.io/etcd/server/v3/etcdserver/api/snap" - - humanize "github.com/dustin/go-humanize" - "go.uber.org/zap" + "go.etcd.io/raft/v3/raftpb" ) const ( @@ -93,7 +93,7 @@ func newPipelineHandler(t *Transport, r Raft, cid types.ID) http.Handler { } func (h *pipelineHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if r.Method != "POST" { + if r.Method != http.MethodPost { w.Header().Set("Allow", "POST") http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) return @@ -111,7 +111,7 @@ func (h *pipelineHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // Limit the data size that could be read from the request body, which ensures that read from // connection will not time out accidentally due to possible blocking in underlying implementation. limitedr := pioutil.NewLimitedBufferReader(r.Body, connReadLimitByte) - b, err := ioutil.ReadAll(limitedr) + b, err := io.ReadAll(limitedr) if err != nil { h.lg.Warn( "failed to read Raft message", @@ -138,9 +138,10 @@ func (h *pipelineHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { receivedBytes.WithLabelValues(types.ID(m.From).String()).Add(float64(len(b))) if err := h.r.Process(context.TODO(), m); err != nil { - switch v := err.(type) { - case writerToResponse: - v.WriteTo(w) + var writerErr writerToResponse + switch { + case errors.As(err, &writerErr): + writerErr.WriteTo(w) default: h.lg.Warn( "failed to process Raft message", @@ -199,7 +200,7 @@ const unknownSnapshotSender = "UNKNOWN_SNAPSHOT_SENDER" func (h *snapshotHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { start := time.Now() - if r.Method != "POST" { + if r.Method != http.MethodPost { w.Header().Set("Allow", "POST") http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) snapshotReceiveFailures.WithLabelValues(unknownSnapshotSender).Inc() @@ -294,11 +295,12 @@ func (h *snapshotHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { ) if err := h.r.Process(context.TODO(), m); err != nil { - switch v := err.(type) { + var writerErr writerToResponse + switch { // Process may return writerToResponse error when doing some // additional checks before calling raft.Node.Step. - case writerToResponse: - v.WriteTo(w) + case errors.As(err, &writerErr): + writerErr.WriteTo(w) default: msg := fmt.Sprintf("failed to process raft message (%v)", err) h.lg.Warn( @@ -346,7 +348,7 @@ func newStreamHandler(t *Transport, pg peerGetter, r Raft, id, cid types.ID) htt } func (h *streamHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if r.Method != "GET" { + if r.Method != http.MethodGet { w.Header().Set("Allow", "GET") http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) return diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/msg_codec.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/msg_codec.go index 7db880baa24fb..5444c01f8fdc7 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/msg_codec.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/msg_codec.go @@ -20,7 +20,7 @@ import ( "io" "go.etcd.io/etcd/pkg/v3/pbutil" - "go.etcd.io/etcd/raft/v3/raftpb" + "go.etcd.io/raft/v3/raftpb" ) // messageEncoder is a encoder that can encode all kinds of messages. diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/msgappv2_codec.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/msgappv2_codec.go index 9a7f4a1770cc1..59425aeea69d2 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/msgappv2_codec.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/msgappv2_codec.go @@ -22,8 +22,8 @@ import ( "go.etcd.io/etcd/client/pkg/v3/types" "go.etcd.io/etcd/pkg/v3/pbutil" - "go.etcd.io/etcd/raft/v3/raftpb" stats "go.etcd.io/etcd/server/v3/etcdserver/api/v2stats" + "go.etcd.io/raft/v3/raftpb" ) const ( diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/peer.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/peer.go index 444d6bde94be9..c1e6ba1270470 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/peer.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/peer.go @@ -19,14 +19,14 @@ import ( "sync" "time" + "go.uber.org/zap" + "golang.org/x/time/rate" + "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/raft/v3" - "go.etcd.io/etcd/raft/v3/raftpb" "go.etcd.io/etcd/server/v3/etcdserver/api/snap" stats "go.etcd.io/etcd/server/v3/etcdserver/api/v2stats" - - "go.uber.org/zap" - "golang.org/x/time/rate" + "go.etcd.io/raft/v3" + "go.etcd.io/raft/v3/raftpb" ) const ( @@ -250,30 +250,16 @@ func (p *peer) send(m raftpb.Message) { if isMsgSnap(m) { p.r.ReportSnapshot(m.To, raft.SnapshotFailure) } - if p.status.isActive() { - if p.lg != nil { - p.lg.Warn( - "dropped internal Raft message since sending buffer is full (overloaded network)", - zap.String("message-type", m.Type.String()), - zap.String("local-member-id", p.localID.String()), - zap.String("from", types.ID(m.From).String()), - zap.String("remote-peer-id", p.id.String()), - zap.String("remote-peer-name", name), - zap.Bool("remote-peer-active", p.status.isActive()), - ) - } - } else { - if p.lg != nil { - p.lg.Warn( - "dropped internal Raft message since sending buffer is full (overloaded network)", - zap.String("message-type", m.Type.String()), - zap.String("local-member-id", p.localID.String()), - zap.String("from", types.ID(m.From).String()), - zap.String("remote-peer-id", p.id.String()), - zap.String("remote-peer-name", name), - zap.Bool("remote-peer-active", p.status.isActive()), - ) - } + if p.lg != nil { + p.lg.Warn( + "dropped internal Raft message since sending buffer is full", + zap.String("message-type", m.Type.String()), + zap.String("local-member-id", p.localID.String()), + zap.String("from", types.ID(m.From).String()), + zap.String("remote-peer-id", p.id.String()), + zap.String("remote-peer-name", name), + zap.Bool("remote-peer-active", p.status.isActive()), + ) } sentFailures.WithLabelValues(types.ID(m.To).String()).Inc() } diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/peer_status.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/peer_status.go index cad19b2fbce8a..01c3ebacfadf0 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/peer_status.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/peer_status.go @@ -20,9 +20,9 @@ import ( "sync" "time" - "go.etcd.io/etcd/client/pkg/v3/types" - "go.uber.org/zap" + + "go.etcd.io/etcd/client/pkg/v3/types" ) type failureType struct { diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/pipeline.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/pipeline.go index de3b459118381..0790b58d03be1 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/pipeline.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/pipeline.go @@ -18,18 +18,18 @@ import ( "bytes" "context" "errors" - "io/ioutil" + "io" "runtime" "sync" "time" + "go.uber.org/zap" + "go.etcd.io/etcd/client/pkg/v3/types" "go.etcd.io/etcd/pkg/v3/pbutil" - "go.etcd.io/etcd/raft/v3" - "go.etcd.io/etcd/raft/v3/raftpb" stats "go.etcd.io/etcd/server/v3/etcdserver/api/v2stats" - - "go.uber.org/zap" + "go.etcd.io/raft/v3" + "go.etcd.io/raft/v3/raftpb" ) const ( @@ -103,7 +103,7 @@ func (p *pipeline) handle() { if err != nil { p.status.deactivate(failureType{source: pipelineMsg, action: "write"}, err.Error()) - if m.Type == raftpb.MsgApp && p.followerStats != nil { + if isMsgApp(m) && p.followerStats != nil { p.followerStats.Fail() } p.raft.ReportUnreachable(m.To) @@ -115,7 +115,7 @@ func (p *pipeline) handle() { } p.status.activate() - if m.Type == raftpb.MsgApp && p.followerStats != nil { + if isMsgApp(m) && p.followerStats != nil { p.followerStats.Succ(end.Sub(start)) } if isMsgSnap(m) { @@ -154,7 +154,7 @@ func (p *pipeline) post(data []byte) (err error) { return err } defer resp.Body.Close() - b, err := ioutil.ReadAll(resp.Body) + b, err := io.ReadAll(resp.Body) if err != nil { p.picker.unreachable(u) return err @@ -165,7 +165,7 @@ func (p *pipeline) post(data []byte) (err error) { p.picker.unreachable(u) // errMemberRemoved is a critical error since a removed member should // always be stopped. So we use reportCriticalError to report it to errorc. - if err == errMemberRemoved { + if errors.Is(err, errMemberRemoved) { reportCriticalError(err, p.errorc) } return err diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/remote.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/remote.go index eddb3f4da1b4d..3eb2f386806fc 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/remote.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/remote.go @@ -15,10 +15,10 @@ package rafthttp import ( - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/raft/v3/raftpb" - "go.uber.org/zap" + + "go.etcd.io/etcd/client/pkg/v3/types" + "go.etcd.io/raft/v3/raftpb" ) type remote struct { diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/snapshot_sender.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/snapshot_sender.go index 84eb56bd0bf7b..07971cbbddcc7 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/snapshot_sender.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/snapshot_sender.go @@ -17,19 +17,19 @@ package rafthttp import ( "bytes" "context" + "errors" "io" - "io/ioutil" "net/http" "time" + "github.com/dustin/go-humanize" + "go.uber.org/zap" + "go.etcd.io/etcd/client/pkg/v3/types" "go.etcd.io/etcd/pkg/v3/httputil" pioutil "go.etcd.io/etcd/pkg/v3/ioutil" - "go.etcd.io/etcd/raft/v3" "go.etcd.io/etcd/server/v3/etcdserver/api/snap" - - "github.com/dustin/go-humanize" - "go.uber.org/zap" + "go.etcd.io/raft/v3" ) var ( @@ -111,7 +111,7 @@ func (s *snapshotSender) send(merged snap.Message) { // errMemberRemoved is a critical error since a removed member should // always be stopped. So we use reportCriticalError to report it to errorc. - if err == errMemberRemoved { + if errors.Is(err, errMemberRemoved) { reportCriticalError(err, s.errorc) } @@ -169,7 +169,7 @@ func (s *snapshotSender) post(req *http.Request) (err error) { // prevents from reading the body forever when the other side dies right after // successfully receives the request body. time.AfterFunc(snapResponseReadTimeout, func() { httputil.GracefulClose(resp) }) - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) result <- responseAndError{resp, body, err} }() diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/stream.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/stream.go index 1b180fca3f749..e823c4770f462 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/stream.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/stream.go @@ -16,25 +16,25 @@ package rafthttp import ( "context" + "errors" "fmt" "io" - "io/ioutil" "net/http" "path" "strings" "sync" "time" + "github.com/coreos/go-semver/semver" + "go.uber.org/zap" + "golang.org/x/time/rate" + "go.etcd.io/etcd/api/v3/version" "go.etcd.io/etcd/client/pkg/v3/transport" "go.etcd.io/etcd/client/pkg/v3/types" "go.etcd.io/etcd/pkg/v3/httputil" - "go.etcd.io/etcd/raft/v3/raftpb" stats "go.etcd.io/etcd/server/v3/etcdserver/api/v2stats" - - "github.com/coreos/go-semver/semver" - "go.uber.org/zap" - "golang.org/x/time/rate" + "go.etcd.io/raft/v3/raftpb" ) const ( @@ -59,6 +59,7 @@ var ( "3.3.0": {streamTypeMsgAppV2, streamTypeMessage}, "3.4.0": {streamTypeMsgAppV2, streamTypeMessage}, "3.5.0": {streamTypeMsgAppV2, streamTypeMessage}, + "3.6.0": {streamTypeMsgAppV2, streamTypeMessage}, } ) @@ -403,7 +404,7 @@ func (cr *streamReader) run() { for { rc, err := cr.dial(t) if err != nil { - if err != errUnsupportedStreamType { + if !errors.Is(err, errUnsupportedStreamType) { cr.status.deactivate(failureType{source: t.String(), action: "dial"}, err.Error()) } } else { @@ -428,7 +429,7 @@ func (cr *streamReader) run() { } switch { // all data is read out - case err == io.EOF: + case errors.Is(err, io.EOF): // connection is closed by the remote case transport.IsClosedConnError(err): default: @@ -574,10 +575,10 @@ func (cr *streamReader) dial(t streamType) (io.ReadCloser, error) { zap.String("address", uu.String()), ) } - req, err := http.NewRequest("GET", uu.String(), nil) + req, err := http.NewRequest(http.MethodGet, uu.String(), nil) if err != nil { cr.picker.unreachable(u) - return nil, fmt.Errorf("failed to make http request to %v (%v)", u, err) + return nil, fmt.Errorf("failed to make http request to %v (%w)", u, err) } req.Header.Set("X-Server-From", cr.tr.ID.String()) req.Header.Set("X-Server-Version", version.Version) @@ -628,7 +629,7 @@ func (cr *streamReader) dial(t streamType) (io.ReadCloser, error) { return nil, fmt.Errorf("peer %s failed to find local node %s", cr.peerID, cr.tr.ID) case http.StatusPreconditionFailed: - b, err := ioutil.ReadAll(resp.Body) + b, err := io.ReadAll(resp.Body) if err != nil { cr.picker.unreachable(u) return nil, err diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/transport.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/transport.go index a1969549a92c3..b376d578b6c48 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/transport.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/transport.go @@ -20,16 +20,16 @@ import ( "sync" "time" + "github.com/xiang90/probing" + "go.uber.org/zap" + "golang.org/x/time/rate" + "go.etcd.io/etcd/client/pkg/v3/transport" "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/raft/v3" - "go.etcd.io/etcd/raft/v3/raftpb" "go.etcd.io/etcd/server/v3/etcdserver/api/snap" stats "go.etcd.io/etcd/server/v3/etcdserver/api/v2stats" - - "github.com/xiang90/probing" - "go.uber.org/zap" - "golang.org/x/time/rate" + "go.etcd.io/raft/v3" + "go.etcd.io/raft/v3/raftpb" ) type Raft interface { @@ -110,7 +110,7 @@ type Transport struct { Raft Raft // raft state machine, to which the Transport forwards received messages and reports status Snapshotter *snap.Snapshotter ServerStats *stats.ServerStats // used to record general transportation statistics - // LeaderStats is used to record transportation statistics with followers when + // LeaderStats records transportation statistics with followers when // performing as leader in raft protocol LeaderStats *stats.LeaderStats // ErrorC is used to report detected critical errors, e.g., @@ -186,7 +186,7 @@ func (t *Transport) Send(msgs []raftpb.Message) { t.mu.RUnlock() if pok { - if m.Type == raftpb.MsgApp { + if isMsgApp(m) { t.ServerStats.SendAppendReq(m.Size()) } p.send(m) @@ -339,24 +339,30 @@ func (t *Transport) RemoveAllPeers() { // the caller of this function must have the peers mutex. func (t *Transport) removePeer(id types.ID) { - if peer, ok := t.peers[id]; ok { + // etcd may remove a member again on startup due to WAL files replaying. + peer, ok := t.peers[id] + if ok { peer.stop() - } else { - if t.Logger != nil { - t.Logger.Panic("unexpected removal of unknown remote peer", zap.String("remote-peer-id", id.String())) - } + delete(t.peers, id) + delete(t.LeaderStats.Followers, id.String()) + t.pipelineProber.Remove(id.String()) + t.streamProber.Remove(id.String()) } - delete(t.peers, id) - delete(t.LeaderStats.Followers, id.String()) - t.pipelineProber.Remove(id.String()) - t.streamProber.Remove(id.String()) if t.Logger != nil { - t.Logger.Info( - "removed remote peer", - zap.String("local-member-id", t.ID.String()), - zap.String("removed-remote-peer-id", id.String()), - ) + if ok { + t.Logger.Info( + "removed remote peer", + zap.String("local-member-id", t.ID.String()), + zap.String("removed-remote-peer-id", id.String()), + ) + } else { + t.Logger.Warn( + "skipped removing already removed peer", + zap.String("local-member-id", t.ID.String()), + zap.String("removed-remote-peer-id", id.String()), + ) + } } } diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/util.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/util.go index 6d0e6bdd6b09e..5fe0728218b92 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/util.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/util.go @@ -23,12 +23,12 @@ import ( "strings" "time" + "github.com/coreos/go-semver/semver" + "go.uber.org/zap" + "go.etcd.io/etcd/api/v3/version" "go.etcd.io/etcd/client/pkg/v3/transport" "go.etcd.io/etcd/client/pkg/v3/types" - - "github.com/coreos/go-semver/semver" - "go.uber.org/zap" ) var ( @@ -64,7 +64,7 @@ func newStreamRoundTripper(tlsInfo transport.TLSInfo, dialTimeout time.Duration) func createPostRequest(lg *zap.Logger, u url.URL, path string, body io.Reader, ct string, urls types.URLs, from, cid types.ID) *http.Request { uu := u uu.Path = path - req, err := http.NewRequest("POST", uu.String(), body) + req, err := http.NewRequest(http.MethodPost, uu.String(), body) if err != nil { if lg != nil { lg.Panic("unexpected new request error", zap.Error(err)) diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/db.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/db.go index af653e1d5fe3d..e8add5e32758f 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/db.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/db.go @@ -18,15 +18,14 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os" "path/filepath" "time" - "go.etcd.io/etcd/client/pkg/v3/fileutil" - humanize "github.com/dustin/go-humanize" "go.uber.org/zap" + + "go.etcd.io/etcd/client/pkg/v3/fileutil" ) var ErrNoDBSnapshot = errors.New("snap: snapshot file doesn't exist") @@ -36,7 +35,7 @@ var ErrNoDBSnapshot = errors.New("snap: snapshot file doesn't exist") func (s *Snapshotter) SaveDBFrom(r io.Reader, id uint64) (int64, error) { start := time.Now() - f, err := ioutil.TempFile(s.dir, "tmp") + f, err := os.CreateTemp(s.dir, "tmp") if err != nil { return 0, err } diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/message.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/message.go index 523b52b85c604..2b4090c981d13 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/message.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/message.go @@ -18,7 +18,7 @@ import ( "io" "go.etcd.io/etcd/pkg/v3/ioutil" - "go.etcd.io/etcd/raft/v3/raftpb" + "go.etcd.io/raft/v3/raftpb" ) // Message is a struct that contains a raft Message and a ReadCloser. The type diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/snappb/snap.pb.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/snappb/snap.pb.go index 6fd2b9c0089e3..ff9d39c7ec25e 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/snappb/snap.pb.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/snappb/snap.pb.go @@ -72,15 +72,18 @@ func init() { func init() { proto.RegisterFile("snap.proto", fileDescriptor_f2e3c045ebf84d00) } var fileDescriptor_f2e3c045ebf84d00 = []byte{ - // 126 bytes of a gzipped FileDescriptorProto + // 164 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xce, 0x4b, 0x2c, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x03, 0xb1, 0x0b, 0x92, 0xa4, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x42, 0xfa, 0x20, 0x16, 0x44, 0x56, 0xc9, 0x8c, 0x8b, 0x03, 0x24, 0x5f, 0x9c, 0x91, 0x5f, 0x22, 0x24, 0xc6, 0xc5, 0x9c, 0x5c, 0x94, 0x2c, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0xeb, 0xc4, 0x72, 0xe2, 0x9e, 0x3c, 0x43, 0x10, 0x48, 0x40, 0x48, 0x88, 0x8b, 0x25, 0x25, 0xb1, 0x24, - 0x51, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xcc, 0x76, 0x12, 0x39, 0xf1, 0x50, 0x8e, 0xe1, + 0x51, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xcc, 0x76, 0xf2, 0x3a, 0xf1, 0x50, 0x8e, 0xe1, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf1, 0x58, 0x8e, - 0x01, 0x10, 0x00, 0x00, 0xff, 0xff, 0xd8, 0x0f, 0x32, 0xb2, 0x78, 0x00, 0x00, 0x00, + 0x21, 0xca, 0x24, 0x3d, 0x5f, 0x2f, 0xb5, 0x24, 0x39, 0x45, 0x2f, 0x33, 0x5f, 0x1f, 0x44, 0xeb, + 0x17, 0xa7, 0x16, 0x95, 0xa5, 0x16, 0xe9, 0x97, 0x19, 0x83, 0xb9, 0x50, 0x5e, 0x62, 0x41, 0xa6, + 0x3e, 0xc8, 0x52, 0x7d, 0x88, 0xcb, 0x00, 0x01, 0x00, 0x00, 0xff, 0xff, 0x8d, 0x65, 0xd9, 0x03, + 0xae, 0x00, 0x00, 0x00, } func (m *Snapshot) Marshal() (dAtA []byte, err error) { diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/snappb/snap.proto b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/snappb/snap.proto index cd3d21d0ee12e..0a74744e4b2d4 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/snappb/snap.proto +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/snappb/snap.proto @@ -3,6 +3,8 @@ package snappb; import "gogoproto/gogo.proto"; +option go_package = "go.etcd.io/etcd/server/v3/etcdserver/api/snap/snappb"; + option (gogoproto.marshaler_all) = true; option (gogoproto.sizer_all) = true; option (gogoproto.unmarshaler_all) = true; diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/snapshotter.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/snapshotter.go index 52cc0ae26704f..2ae2b00234101 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/snapshotter.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/snapshotter.go @@ -18,7 +18,6 @@ import ( "errors" "fmt" "hash/crc32" - "io/ioutil" "os" "path/filepath" "sort" @@ -26,14 +25,15 @@ import ( "strings" "time" + "go.uber.org/zap" + + "go.etcd.io/etcd/client/pkg/v3/verify" pioutil "go.etcd.io/etcd/pkg/v3/ioutil" "go.etcd.io/etcd/pkg/v3/pbutil" - "go.etcd.io/etcd/raft/v3" - "go.etcd.io/etcd/raft/v3/raftpb" "go.etcd.io/etcd/server/v3/etcdserver/api/snap/snappb" - "go.etcd.io/etcd/server/v3/wal/walpb" - - "go.uber.org/zap" + "go.etcd.io/etcd/server/v3/storage/wal/walpb" + "go.etcd.io/raft/v3" + "go.etcd.io/raft/v3/raftpb" ) const snapSuffix = ".snap" @@ -95,7 +95,7 @@ func (s *Snapshotter) save(snapshot *raftpb.Snapshot) error { s.lg.Warn("failed to write a snap file", zap.String("path", spath), zap.Error(err)) rerr := os.Remove(spath) if rerr != nil { - s.lg.Warn("failed to remove a broken snap file", zap.String("path", spath), zap.Error(err)) + s.lg.Warn("failed to remove a broken snap file", zap.String("path", spath), zap.Error(rerr)) } return err } @@ -130,29 +130,23 @@ func (s *Snapshotter) loadMatching(matchFn func(*raftpb.Snapshot) bool) (*raftpb } var snap *raftpb.Snapshot for _, name := range names { - if snap, err = loadSnap(s.lg, s.dir, name); err == nil && matchFn(snap) { + if snap, err = s.loadSnap(name); err == nil && matchFn(snap) { return snap, nil } } return nil, ErrNoSnapshot } -func loadSnap(lg *zap.Logger, dir, name string) (*raftpb.Snapshot, error) { - fpath := filepath.Join(dir, name) - snap, err := Read(lg, fpath) +func (s *Snapshotter) loadSnap(name string) (*raftpb.Snapshot, error) { + fpath := filepath.Join(s.dir, name) + snap, err := Read(s.lg, fpath) if err != nil { brokenPath := fpath + ".broken" - if lg != nil { - lg.Warn("failed to read a snap file", zap.String("path", fpath), zap.Error(err)) - } + s.lg.Warn("failed to read a snap file", zap.String("path", fpath), zap.Error(err)) if rerr := os.Rename(fpath, brokenPath); rerr != nil { - if lg != nil { - lg.Warn("failed to rename a broken snap file", zap.String("path", fpath), zap.String("broken-path", brokenPath), zap.Error(rerr)) - } + s.lg.Warn("failed to rename a broken snap file", zap.String("path", fpath), zap.String("broken-path", brokenPath), zap.Error(rerr)) } else { - if lg != nil { - lg.Warn("renamed to a broken snap file", zap.String("path", fpath), zap.String("broken-path", brokenPath)) - } + s.lg.Warn("renamed to a broken snap file", zap.String("path", fpath), zap.String("broken-path", brokenPath)) } } return snap, err @@ -160,53 +154,42 @@ func loadSnap(lg *zap.Logger, dir, name string) (*raftpb.Snapshot, error) { // Read reads the snapshot named by snapname and returns the snapshot. func Read(lg *zap.Logger, snapname string) (*raftpb.Snapshot, error) { - b, err := ioutil.ReadFile(snapname) + verify.Assert(lg != nil, "the logger should not be nil") + b, err := os.ReadFile(snapname) if err != nil { - if lg != nil { - lg.Warn("failed to read a snap file", zap.String("path", snapname), zap.Error(err)) - } + lg.Warn("failed to read a snap file", zap.String("path", snapname), zap.Error(err)) return nil, err } if len(b) == 0 { - if lg != nil { - lg.Warn("failed to read empty snapshot file", zap.String("path", snapname)) - } + lg.Warn("failed to read empty snapshot file", zap.String("path", snapname)) return nil, ErrEmptySnapshot } var serializedSnap snappb.Snapshot if err = serializedSnap.Unmarshal(b); err != nil { - if lg != nil { - lg.Warn("failed to unmarshal snappb.Snapshot", zap.String("path", snapname), zap.Error(err)) - } + lg.Warn("failed to unmarshal snappb.Snapshot", zap.String("path", snapname), zap.Error(err)) return nil, err } if len(serializedSnap.Data) == 0 || serializedSnap.Crc == 0 { - if lg != nil { - lg.Warn("failed to read empty snapshot data", zap.String("path", snapname)) - } + lg.Warn("failed to read empty snapshot data", zap.String("path", snapname)) return nil, ErrEmptySnapshot } crc := crc32.Update(0, crcTable, serializedSnap.Data) if crc != serializedSnap.Crc { - if lg != nil { - lg.Warn("snap file is corrupt", - zap.String("path", snapname), - zap.Uint32("prev-crc", serializedSnap.Crc), - zap.Uint32("new-crc", crc), - ) - } + lg.Warn("snap file is corrupt", + zap.String("path", snapname), + zap.Uint32("prev-crc", serializedSnap.Crc), + zap.Uint32("new-crc", crc), + ) return nil, ErrCRCMismatch } var snap raftpb.Snapshot if err = snap.Unmarshal(serializedSnap.Data); err != nil { - if lg != nil { - lg.Warn("failed to unmarshal raftpb.Snapshot", zap.String("path", snapname), zap.Error(err)) - } + lg.Warn("failed to unmarshal raftpb.Snapshot", zap.String("path", snapname), zap.Error(err)) return nil, err } return &snap, nil @@ -228,7 +211,7 @@ func (s *Snapshotter) snapNames() ([]string, error) { if err != nil { return nil, err } - snaps := checkSuffix(s.lg, filenames) + snaps := s.checkSuffix(filenames) if len(snaps) == 0 { return nil, ErrNoSnapshot } @@ -236,18 +219,16 @@ func (s *Snapshotter) snapNames() ([]string, error) { return snaps, nil } -func checkSuffix(lg *zap.Logger, names []string) []string { - snaps := []string{} +func (s *Snapshotter) checkSuffix(names []string) []string { + var snaps []string for i := range names { if strings.HasSuffix(names[i], snapSuffix) { snaps = append(snaps, names[i]) } else { // If we find a file which is not a snapshot then check if it's - // a vaild file. If not throw out a warning. + // a valid file. If not throw out a warning. if _, ok := validFiles[names[i]]; !ok { - if lg != nil { - lg.Warn("found unexpected non-snap file; skipping", zap.String("path", names[i])) - } + s.lg.Warn("found unexpected non-snap file; skipping", zap.String("path", names[i])) } } } @@ -262,7 +243,7 @@ func (s *Snapshotter) cleanupSnapdir(filenames []string) (names []string, err er if strings.HasPrefix(filename, "db.tmp") { s.lg.Info("found orphaned defragmentation file; deleting", zap.String("path", filename)) if rmErr := os.Remove(filepath.Join(s.dir, filename)); rmErr != nil && !os.IsNotExist(rmErr) { - return names, fmt.Errorf("failed to remove orphaned .snap.db file %s: %v", filename, rmErr) + return names, fmt.Errorf("failed to remove orphaned .snap.db file %s: %w", filename, rmErr) } } else { names = append(names, filename) diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2auth/auth.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2auth/auth.go deleted file mode 100644 index e76ee8b2a1707..0000000000000 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2auth/auth.go +++ /dev/null @@ -1,670 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package v2auth implements etcd authentication. -package v2auth - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "path" - "reflect" - "sort" - "strings" - "time" - - "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/server/v3/etcdserver" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2error" - - "go.uber.org/zap" - "golang.org/x/crypto/bcrypt" -) - -const ( - // StorePermsPrefix is the internal prefix of the storage layer dedicated to storing user data. - StorePermsPrefix = "/2" - - // RootRoleName is the name of the ROOT role, with privileges to manage the cluster. - RootRoleName = "root" - - // GuestRoleName is the name of the role that defines the privileges of an unauthenticated user. - GuestRoleName = "guest" -) - -var rootRole = Role{ - Role: RootRoleName, - Permissions: Permissions{ - KV: RWPermission{ - Read: []string{"/*"}, - Write: []string{"/*"}, - }, - }, -} - -var guestRole = Role{ - Role: GuestRoleName, - Permissions: Permissions{ - KV: RWPermission{ - Read: []string{"/*"}, - Write: []string{"/*"}, - }, - }, -} - -type doer interface { - Do(context.Context, etcdserverpb.Request) (etcdserver.Response, error) -} - -type Store interface { - AllUsers() ([]string, error) - GetUser(name string) (User, error) - CreateOrUpdateUser(user User) (out User, created bool, err error) - CreateUser(user User) (User, error) - DeleteUser(name string) error - UpdateUser(user User) (User, error) - AllRoles() ([]string, error) - GetRole(name string) (Role, error) - CreateRole(role Role) error - DeleteRole(name string) error - UpdateRole(role Role) (Role, error) - AuthEnabled() bool - EnableAuth() error - DisableAuth() error - PasswordStore -} - -type PasswordStore interface { - CheckPassword(user User, password string) bool - HashPassword(password string) (string, error) -} - -type store struct { - lg *zap.Logger - server doer - timeout time.Duration - ensuredOnce bool - - PasswordStore -} - -type User struct { - User string `json:"user"` - Password string `json:"password,omitempty"` - Roles []string `json:"roles"` - Grant []string `json:"grant,omitempty"` - Revoke []string `json:"revoke,omitempty"` -} - -type Role struct { - Role string `json:"role"` - Permissions Permissions `json:"permissions"` - Grant *Permissions `json:"grant,omitempty"` - Revoke *Permissions `json:"revoke,omitempty"` -} - -type Permissions struct { - KV RWPermission `json:"kv"` -} - -func (p *Permissions) IsEmpty() bool { - return p == nil || (len(p.KV.Read) == 0 && len(p.KV.Write) == 0) -} - -type RWPermission struct { - Read []string `json:"read"` - Write []string `json:"write"` -} - -type Error struct { - Status int - Errmsg string -} - -func (ae Error) Error() string { return ae.Errmsg } -func (ae Error) HTTPStatus() int { return ae.Status } - -func authErr(hs int, s string, v ...interface{}) Error { - return Error{Status: hs, Errmsg: fmt.Sprintf("auth: "+s, v...)} -} - -func NewStore(lg *zap.Logger, server doer, timeout time.Duration) Store { - if lg == nil { - lg = zap.NewNop() - } - s := &store{ - lg: lg, - server: server, - timeout: timeout, - PasswordStore: passwordStore{}, - } - return s -} - -// passwordStore implements PasswordStore using bcrypt to hash user passwords -type passwordStore struct{} - -func (passwordStore) CheckPassword(user User, password string) bool { - err := bcrypt.CompareHashAndPassword([]byte(user.Password), []byte(password)) - return err == nil -} - -func (passwordStore) HashPassword(password string) (string, error) { - hash, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost) - return string(hash), err -} - -func (s *store) AllUsers() ([]string, error) { - resp, err := s.requestResource("/users/", false) - if err != nil { - if e, ok := err.(*v2error.Error); ok { - if e.ErrorCode == v2error.EcodeKeyNotFound { - return []string{}, nil - } - } - return nil, err - } - var nodes []string - for _, n := range resp.Event.Node.Nodes { - _, user := path.Split(n.Key) - nodes = append(nodes, user) - } - sort.Strings(nodes) - return nodes, nil -} - -func (s *store) GetUser(name string) (User, error) { return s.getUser(name, false) } - -// CreateOrUpdateUser should be only used for creating the new user or when you are not -// sure if it is a create or update. (When only password is passed in, we are not sure -// if it is a update or create) -func (s *store) CreateOrUpdateUser(user User) (out User, created bool, err error) { - _, err = s.getUser(user.User, true) - if err == nil { - out, err = s.UpdateUser(user) - return out, false, err - } - u, err := s.CreateUser(user) - return u, true, err -} - -func (s *store) CreateUser(user User) (User, error) { - // Attach root role to root user. - if user.User == "root" { - user = attachRootRole(user) - } - u, err := s.createUserInternal(user) - if err == nil { - s.lg.Info("created a user", zap.String("user-name", user.User)) - } - return u, err -} - -func (s *store) createUserInternal(user User) (User, error) { - if user.Password == "" { - return user, authErr(http.StatusBadRequest, "Cannot create user %s with an empty password", user.User) - } - hash, err := s.HashPassword(user.Password) - if err != nil { - return user, err - } - user.Password = hash - - _, err = s.createResource("/users/"+user.User, user) - if err != nil { - if e, ok := err.(*v2error.Error); ok { - if e.ErrorCode == v2error.EcodeNodeExist { - return user, authErr(http.StatusConflict, "User %s already exists.", user.User) - } - } - } - return user, err -} - -func (s *store) DeleteUser(name string) error { - if s.AuthEnabled() && name == "root" { - return authErr(http.StatusForbidden, "Cannot delete root user while auth is enabled.") - } - err := s.deleteResource("/users/" + name) - if err != nil { - if e, ok := err.(*v2error.Error); ok { - if e.ErrorCode == v2error.EcodeKeyNotFound { - return authErr(http.StatusNotFound, "User %s does not exist", name) - } - } - return err - } - s.lg.Info("deleted a user", zap.String("user-name", name)) - return nil -} - -func (s *store) UpdateUser(user User) (User, error) { - old, err := s.getUser(user.User, true) - if err != nil { - if e, ok := err.(*v2error.Error); ok { - if e.ErrorCode == v2error.EcodeKeyNotFound { - return user, authErr(http.StatusNotFound, "User %s doesn't exist.", user.User) - } - } - return old, err - } - - newUser, err := old.merge(s.lg, user, s.PasswordStore) - if err != nil { - return old, err - } - if reflect.DeepEqual(old, newUser) { - return old, authErr(http.StatusBadRequest, "User not updated. Use grant/revoke/password to update the user.") - } - _, err = s.updateResource("/users/"+user.User, newUser) - if err == nil { - s.lg.Info("updated a user", zap.String("user-name", user.User)) - } - return newUser, err -} - -func (s *store) AllRoles() ([]string, error) { - nodes := []string{RootRoleName} - resp, err := s.requestResource("/roles/", false) - if err != nil { - if e, ok := err.(*v2error.Error); ok { - if e.ErrorCode == v2error.EcodeKeyNotFound { - return nodes, nil - } - } - return nil, err - } - for _, n := range resp.Event.Node.Nodes { - _, role := path.Split(n.Key) - nodes = append(nodes, role) - } - sort.Strings(nodes) - return nodes, nil -} - -func (s *store) GetRole(name string) (Role, error) { return s.getRole(name, false) } - -func (s *store) CreateRole(role Role) error { - if role.Role == RootRoleName { - return authErr(http.StatusForbidden, "Cannot modify role %s: is root role.", role.Role) - } - _, err := s.createResource("/roles/"+role.Role, role) - if err != nil { - if e, ok := err.(*v2error.Error); ok { - if e.ErrorCode == v2error.EcodeNodeExist { - return authErr(http.StatusConflict, "Role %s already exists.", role.Role) - } - } - } - if err == nil { - s.lg.Info("created a new role", zap.String("role-name", role.Role)) - } - return err -} - -func (s *store) DeleteRole(name string) error { - if name == RootRoleName { - return authErr(http.StatusForbidden, "Cannot modify role %s: is root role.", name) - } - err := s.deleteResource("/roles/" + name) - if err != nil { - if e, ok := err.(*v2error.Error); ok { - if e.ErrorCode == v2error.EcodeKeyNotFound { - return authErr(http.StatusNotFound, "Role %s doesn't exist.", name) - } - } - } - if err == nil { - s.lg.Info("delete a new role", zap.String("role-name", name)) - } - return err -} - -func (s *store) UpdateRole(role Role) (Role, error) { - if role.Role == RootRoleName { - return Role{}, authErr(http.StatusForbidden, "Cannot modify role %s: is root role.", role.Role) - } - old, err := s.getRole(role.Role, true) - if err != nil { - if e, ok := err.(*v2error.Error); ok { - if e.ErrorCode == v2error.EcodeKeyNotFound { - return role, authErr(http.StatusNotFound, "Role %s doesn't exist.", role.Role) - } - } - return old, err - } - newRole, err := old.merge(s.lg, role) - if err != nil { - return old, err - } - if reflect.DeepEqual(old, newRole) { - return old, authErr(http.StatusBadRequest, "Role not updated. Use grant/revoke to update the role.") - } - _, err = s.updateResource("/roles/"+role.Role, newRole) - if err == nil { - s.lg.Info("updated a new role", zap.String("role-name", role.Role)) - } - return newRole, err -} - -func (s *store) AuthEnabled() bool { - return s.detectAuth() -} - -func (s *store) EnableAuth() error { - if s.AuthEnabled() { - return authErr(http.StatusConflict, "already enabled") - } - - if _, err := s.getUser("root", true); err != nil { - return authErr(http.StatusConflict, "No root user available, please create one") - } - if _, err := s.getRole(GuestRoleName, true); err != nil { - s.lg.Info( - "no guest role access found; creating default", - zap.String("role-name", GuestRoleName), - ) - if err := s.CreateRole(guestRole); err != nil { - s.lg.Warn( - "failed to create a guest role; aborting auth enable", - zap.String("role-name", GuestRoleName), - zap.Error(err), - ) - return err - } - } - - if err := s.enableAuth(); err != nil { - s.lg.Warn("failed to enable auth", zap.Error(err)) - return err - } - - s.lg.Info("enabled auth") - return nil -} - -func (s *store) DisableAuth() error { - if !s.AuthEnabled() { - return authErr(http.StatusConflict, "already disabled") - } - - err := s.disableAuth() - if err == nil { - s.lg.Info("disabled auth") - } else { - s.lg.Warn("failed to disable auth", zap.Error(err)) - } - return err -} - -// merge applies the properties of the passed-in User to the User on which it -// is called and returns a new User with these modifications applied. Think of -// all Users as immutable sets of data. Merge allows you to perform the set -// operations (desired grants and revokes) atomically -func (ou User) merge(lg *zap.Logger, nu User, s PasswordStore) (User, error) { - var out User - if ou.User != nu.User { - return out, authErr(http.StatusConflict, "Merging user data with conflicting usernames: %s %s", ou.User, nu.User) - } - out.User = ou.User - if nu.Password != "" { - hash, err := s.HashPassword(nu.Password) - if err != nil { - return ou, err - } - out.Password = hash - } else { - out.Password = ou.Password - } - currentRoles := types.NewUnsafeSet(ou.Roles...) - for _, g := range nu.Grant { - if currentRoles.Contains(g) { - lg.Warn( - "attempted to grant a duplicate role for a user", - zap.String("user-name", nu.User), - zap.String("role-name", g), - ) - return User{}, authErr(http.StatusConflict, fmt.Sprintf("Granting duplicate role %s for user %s", g, nu.User)) - } - currentRoles.Add(g) - } - for _, r := range nu.Revoke { - if !currentRoles.Contains(r) { - lg.Warn( - "attempted to revoke a ungranted role for a user", - zap.String("user-name", nu.User), - zap.String("role-name", r), - ) - return User{}, authErr(http.StatusConflict, fmt.Sprintf("Revoking ungranted role %s for user %s", r, nu.User)) - } - currentRoles.Remove(r) - } - out.Roles = currentRoles.Values() - sort.Strings(out.Roles) - return out, nil -} - -// merge for a role works the same as User above -- atomic Role application to -// each of the substructures. -func (r Role) merge(lg *zap.Logger, n Role) (Role, error) { - var out Role - var err error - if r.Role != n.Role { - return out, authErr(http.StatusConflict, "Merging role with conflicting names: %s %s", r.Role, n.Role) - } - out.Role = r.Role - out.Permissions, err = r.Permissions.Grant(n.Grant) - if err != nil { - return out, err - } - out.Permissions, err = out.Permissions.Revoke(lg, n.Revoke) - return out, err -} - -func (r Role) HasKeyAccess(key string, write bool) bool { - if r.Role == RootRoleName { - return true - } - return r.Permissions.KV.HasAccess(key, write) -} - -func (r Role) HasRecursiveAccess(key string, write bool) bool { - if r.Role == RootRoleName { - return true - } - return r.Permissions.KV.HasRecursiveAccess(key, write) -} - -// Grant adds a set of permissions to the permission object on which it is called, -// returning a new permission object. -func (p Permissions) Grant(n *Permissions) (Permissions, error) { - var out Permissions - var err error - if n == nil { - return p, nil - } - out.KV, err = p.KV.Grant(n.KV) - return out, err -} - -// Revoke removes a set of permissions to the permission object on which it is called, -// returning a new permission object. -func (p Permissions) Revoke(lg *zap.Logger, n *Permissions) (Permissions, error) { - var out Permissions - var err error - if n == nil { - return p, nil - } - out.KV, err = p.KV.Revoke(lg, n.KV) - return out, err -} - -// Grant adds a set of permissions to the permission object on which it is called, -// returning a new permission object. -func (rw RWPermission) Grant(n RWPermission) (RWPermission, error) { - var out RWPermission - currentRead := types.NewUnsafeSet(rw.Read...) - for _, r := range n.Read { - if currentRead.Contains(r) { - return out, authErr(http.StatusConflict, "Granting duplicate read permission %s", r) - } - currentRead.Add(r) - } - currentWrite := types.NewUnsafeSet(rw.Write...) - for _, w := range n.Write { - if currentWrite.Contains(w) { - return out, authErr(http.StatusConflict, "Granting duplicate write permission %s", w) - } - currentWrite.Add(w) - } - out.Read = currentRead.Values() - out.Write = currentWrite.Values() - sort.Strings(out.Read) - sort.Strings(out.Write) - return out, nil -} - -// Revoke removes a set of permissions to the permission object on which it is called, -// returning a new permission object. -func (rw RWPermission) Revoke(lg *zap.Logger, n RWPermission) (RWPermission, error) { - var out RWPermission - currentRead := types.NewUnsafeSet(rw.Read...) - for _, r := range n.Read { - if !currentRead.Contains(r) { - lg.Info( - "revoking ungranted read permission", - zap.String("read-permission", r), - ) - continue - } - currentRead.Remove(r) - } - currentWrite := types.NewUnsafeSet(rw.Write...) - for _, w := range n.Write { - if !currentWrite.Contains(w) { - lg.Info( - "revoking ungranted write permission", - zap.String("write-permission", w), - ) - continue - } - currentWrite.Remove(w) - } - out.Read = currentRead.Values() - out.Write = currentWrite.Values() - sort.Strings(out.Read) - sort.Strings(out.Write) - return out, nil -} - -func (rw RWPermission) HasAccess(key string, write bool) bool { - var list []string - if write { - list = rw.Write - } else { - list = rw.Read - } - for _, pat := range list { - match, err := simpleMatch(pat, key) - if err == nil && match { - return true - } - } - return false -} - -func (rw RWPermission) HasRecursiveAccess(key string, write bool) bool { - list := rw.Read - if write { - list = rw.Write - } - for _, pat := range list { - match, err := prefixMatch(pat, key) - if err == nil && match { - return true - } - } - return false -} - -func simpleMatch(pattern string, key string) (match bool, err error) { - if pattern[len(pattern)-1] == '*' { - return strings.HasPrefix(key, pattern[:len(pattern)-1]), nil - } - return key == pattern, nil -} - -func prefixMatch(pattern string, key string) (match bool, err error) { - if pattern[len(pattern)-1] != '*' { - return false, nil - } - return strings.HasPrefix(key, pattern[:len(pattern)-1]), nil -} - -func attachRootRole(u User) User { - inRoles := false - for _, r := range u.Roles { - if r == RootRoleName { - inRoles = true - break - } - } - if !inRoles { - u.Roles = append(u.Roles, RootRoleName) - } - return u -} - -func (s *store) getUser(name string, quorum bool) (User, error) { - resp, err := s.requestResource("/users/"+name, quorum) - if err != nil { - if e, ok := err.(*v2error.Error); ok { - if e.ErrorCode == v2error.EcodeKeyNotFound { - return User{}, authErr(http.StatusNotFound, "User %s does not exist.", name) - } - } - return User{}, err - } - var u User - err = json.Unmarshal([]byte(*resp.Event.Node.Value), &u) - if err != nil { - return u, err - } - // Attach root role to root user. - if u.User == "root" { - u = attachRootRole(u) - } - return u, nil -} - -func (s *store) getRole(name string, quorum bool) (Role, error) { - if name == RootRoleName { - return rootRole, nil - } - resp, err := s.requestResource("/roles/"+name, quorum) - if err != nil { - if e, ok := err.(*v2error.Error); ok { - if e.ErrorCode == v2error.EcodeKeyNotFound { - return Role{}, authErr(http.StatusNotFound, "Role %s does not exist.", name) - } - } - return Role{}, err - } - var r Role - err = json.Unmarshal([]byte(*resp.Event.Node.Value), &r) - return r, err -} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2auth/auth_requests.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2auth/auth_requests.go deleted file mode 100644 index 6c8c50c8ccc23..0000000000000 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2auth/auth_requests.go +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2auth - -import ( - "context" - "encoding/json" - "path" - - "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/server/v3/etcdserver" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2error" - - "go.uber.org/zap" -) - -func (s *store) ensureAuthDirectories() error { - if s.ensuredOnce { - return nil - } - for _, res := range []string{StorePermsPrefix, StorePermsPrefix + "/users/", StorePermsPrefix + "/roles/"} { - ctx, cancel := context.WithTimeout(context.Background(), s.timeout) - pe := false - rr := etcdserverpb.Request{ - Method: "PUT", - Path: res, - Dir: true, - PrevExist: &pe, - } - _, err := s.server.Do(ctx, rr) - cancel() - if err != nil { - if e, ok := err.(*v2error.Error); ok { - if e.ErrorCode == v2error.EcodeNodeExist { - continue - } - } - s.lg.Warn( - "failed to create auth directories", - zap.Error(err), - ) - return err - } - } - ctx, cancel := context.WithTimeout(context.Background(), s.timeout) - defer cancel() - pe := false - rr := etcdserverpb.Request{ - Method: "PUT", - Path: StorePermsPrefix + "/enabled", - Val: "false", - PrevExist: &pe, - } - _, err := s.server.Do(ctx, rr) - if err != nil { - if e, ok := err.(*v2error.Error); ok { - if e.ErrorCode == v2error.EcodeNodeExist { - s.ensuredOnce = true - return nil - } - } - return err - } - s.ensuredOnce = true - return nil -} - -func (s *store) enableAuth() error { - _, err := s.updateResource("/enabled", true) - return err -} -func (s *store) disableAuth() error { - _, err := s.updateResource("/enabled", false) - return err -} - -func (s *store) detectAuth() bool { - if s.server == nil { - return false - } - value, err := s.requestResource("/enabled", false) - if err != nil { - if e, ok := err.(*v2error.Error); ok { - if e.ErrorCode == v2error.EcodeKeyNotFound { - return false - } - } - s.lg.Warn( - "failed to detect auth settings", - zap.Error(err), - ) - return false - } - - var u bool - err = json.Unmarshal([]byte(*value.Event.Node.Value), &u) - if err != nil { - s.lg.Warn( - "internal bookkeeping value for enabled isn't valid JSON", - zap.Error(err), - ) - return false - } - return u -} - -func (s *store) requestResource(res string, quorum bool) (etcdserver.Response, error) { - ctx, cancel := context.WithTimeout(context.Background(), s.timeout) - defer cancel() - p := path.Join(StorePermsPrefix, res) - method := "GET" - if quorum { - method = "QGET" - } - rr := etcdserverpb.Request{ - Method: method, - Path: p, - Dir: false, // TODO: always false? - } - return s.server.Do(ctx, rr) -} - -func (s *store) updateResource(res string, value interface{}) (etcdserver.Response, error) { - return s.setResource(res, value, true) -} -func (s *store) createResource(res string, value interface{}) (etcdserver.Response, error) { - return s.setResource(res, value, false) -} -func (s *store) setResource(res string, value interface{}, prevexist bool) (etcdserver.Response, error) { - err := s.ensureAuthDirectories() - if err != nil { - return etcdserver.Response{}, err - } - ctx, cancel := context.WithTimeout(context.Background(), s.timeout) - defer cancel() - data, err := json.Marshal(value) - if err != nil { - return etcdserver.Response{}, err - } - p := path.Join(StorePermsPrefix, res) - rr := etcdserverpb.Request{ - Method: "PUT", - Path: p, - Val: string(data), - PrevExist: &prevexist, - } - return s.server.Do(ctx, rr) -} - -func (s *store) deleteResource(res string) error { - err := s.ensureAuthDirectories() - if err != nil { - return err - } - ctx, cancel := context.WithTimeout(context.Background(), s.timeout) - defer cancel() - pex := true - p := path.Join(StorePermsPrefix, res) - _, err = s.server.Do(ctx, etcdserverpb.Request{ - Method: "DELETE", - Path: p, - PrevExist: &pex, - }) - return err -} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2discovery/discovery.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2discovery/discovery.go index 9f1bc0adf50cf..610ae549441dd 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2discovery/discovery.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2discovery/discovery.go @@ -29,12 +29,12 @@ import ( "strings" "time" + "github.com/jonboulle/clockwork" + "go.uber.org/zap" + "go.etcd.io/etcd/client/pkg/v3/transport" "go.etcd.io/etcd/client/pkg/v3/types" "go.etcd.io/etcd/client/v2" - - "github.com/jonboulle/clockwork" - "go.uber.org/zap" ) var ( @@ -110,7 +110,7 @@ func newProxyFunc(lg *zap.Logger, proxy string) (func(*http.Request) (*url.URL, } } if err != nil { - return nil, fmt.Errorf("invalid proxy address %q: %v", proxy, err) + return nil, fmt.Errorf("invalid proxy address %q: %w", proxy, err) } lg.Info("running proxy with discovery", zap.String("proxy-url", proxyURL.String())) @@ -187,7 +187,7 @@ func (d *discovery) joinCluster(config string) (string, error) { func (d *discovery) getCluster() (string, error) { nodes, size, index, err := d.checkCluster() if err != nil { - if err == ErrFullCluster { + if errors.Is(err, ErrFullCluster) { return nodesToCluster(nodes, size) } return "", err @@ -205,7 +205,8 @@ func (d *discovery) createSelf(contents string) error { resp, err := d.c.Create(ctx, d.selfKey(), contents) cancel() if err != nil { - if eerr, ok := err.(client.Error); ok && eerr.Code == client.ErrorCodeNodeExist { + var eerr client.Error + if errors.As(err, &eerr) && eerr.Code == client.ErrorCodeNodeExist { return ErrDuplicateID } return err @@ -224,13 +225,15 @@ func (d *discovery) checkCluster() ([]*client.Node, uint64, uint64, error) { resp, err := d.c.Get(ctx, path.Join(configKey, "size"), nil) cancel() if err != nil { - if eerr, ok := err.(*client.Error); ok && eerr.Code == client.ErrorCodeKeyNotFound { + var eerr *client.Error + if errors.As(err, &eerr) && eerr.Code == client.ErrorCodeKeyNotFound { return nil, 0, 0, ErrSizeNotFound } - if err == client.ErrInvalidJSON { + if errors.Is(err, client.ErrInvalidJSON) { return nil, 0, 0, ErrBadDiscoveryEndpoint } - if ce, ok := err.(*client.ClusterError); ok { + var ce *client.ClusterError + if errors.As(err, &ce) { d.lg.Warn( "failed to get from discovery server", zap.String("discovery-url", d.url.String()), @@ -251,7 +254,8 @@ func (d *discovery) checkCluster() ([]*client.Node, uint64, uint64, error) { resp, err = d.c.Get(ctx, d.cluster, nil) cancel() if err != nil { - if ce, ok := err.(*client.ClusterError); ok { + var ce *client.ClusterError + if errors.As(err, &ce) { d.lg.Warn( "failed to get from discovery server", zap.String("discovery-url", d.url.String()), @@ -357,7 +361,8 @@ func (d *discovery) waitNodes(nodes []*client.Node, size uint64, index uint64) ( ) resp, err := w.Next(context.Background()) if err != nil { - if ce, ok := err.(*client.ClusterError); ok { + var ce *client.ClusterError + if errors.As(err, &ce) { d.lg.Warn( "error while waiting for peers", zap.String("discovery-url", d.url.String()), diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2error/error.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2error/error.go index 623863254a562..0cbf2bf6930e3 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2error/error.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2error/error.go @@ -125,10 +125,6 @@ type Error struct { Index uint64 `json:"index"` } -func NewRequestError(errorCode int, cause string) *Error { - return NewError(errorCode, cause, 0) -} - func NewError(errorCode int, cause string, index uint64) *Error { return &Error{ ErrorCode: errorCode, @@ -143,7 +139,7 @@ func (e Error) Error() string { return e.Message + " (" + e.Cause + ")" } -func (e Error) toJsonString() string { +func (e Error) toJSONString() string { b, _ := json.Marshal(e) return string(b) } @@ -160,6 +156,6 @@ func (e Error) WriteTo(w http.ResponseWriter) error { w.Header().Add("X-Etcd-Index", fmt.Sprint(e.Index)) w.Header().Set("Content-Type", "application/json") w.WriteHeader(e.StatusCode()) - _, err := w.Write([]byte(e.toJsonString() + "\n")) + _, err := w.Write([]byte(e.toJSONString() + "\n")) return err } diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2http/capability.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2http/capability.go deleted file mode 100644 index 718b5ed502a5b..0000000000000 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2http/capability.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2http - -import ( - "fmt" - "net/http" - - "go.etcd.io/etcd/server/v3/etcdserver/api" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2http/httptypes" -) - -func authCapabilityHandler(fn func(http.ResponseWriter, *http.Request)) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if !api.IsCapabilityEnabled(api.AuthCapability) { - notCapable(w, r, api.AuthCapability) - return - } - fn(w, r) - } -} - -func notCapable(w http.ResponseWriter, r *http.Request, c api.Capability) { - herr := httptypes.NewHTTPError(http.StatusInternalServerError, fmt.Sprintf("Not capable of accessing %s feature during rolling upgrades.", c)) - if err := herr.WriteTo(w); err != nil { - // TODO: the following plog was removed, add the logging back if possible - // plog.Debugf("error writing HTTPError (%v) to %s", err, r.RemoteAddr) - } -} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2http/client.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2http/client.go deleted file mode 100644 index 78994595004c0..0000000000000 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2http/client.go +++ /dev/null @@ -1,757 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2http - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "path" - "strconv" - "strings" - "time" - - "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/server/v3/etcdserver" - "go.etcd.io/etcd/server/v3/etcdserver/api" - "go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp" - "go.etcd.io/etcd/server/v3/etcdserver/api/membership" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2auth" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2error" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2http/httptypes" - stats "go.etcd.io/etcd/server/v3/etcdserver/api/v2stats" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" - - "github.com/jonboulle/clockwork" - "go.uber.org/zap" -) - -const ( - authPrefix = "/v2/auth" - keysPrefix = "/v2/keys" - machinesPrefix = "/v2/machines" - membersPrefix = "/v2/members" - statsPrefix = "/v2/stats" -) - -// NewClientHandler generates a muxed http.Handler with the given parameters to serve etcd client requests. -func NewClientHandler(lg *zap.Logger, server etcdserver.ServerPeer, timeout time.Duration) http.Handler { - if lg == nil { - lg = zap.NewNop() - } - mux := http.NewServeMux() - etcdhttp.HandleBasic(lg, mux, server) - etcdhttp.HandleMetrics(mux) - etcdhttp.HandleHealthForV2(lg, mux, server) - handleV2(lg, mux, server, timeout) - return requestLogger(lg, mux) -} - -func handleV2(lg *zap.Logger, mux *http.ServeMux, server etcdserver.ServerV2, timeout time.Duration) { - sec := v2auth.NewStore(lg, server, timeout) - kh := &keysHandler{ - lg: lg, - sec: sec, - server: server, - cluster: server.Cluster(), - timeout: timeout, - clientCertAuthEnabled: server.ClientCertAuthEnabled(), - } - - sh := &statsHandler{ - lg: lg, - stats: server, - } - - mh := &membersHandler{ - lg: lg, - sec: sec, - server: server, - cluster: server.Cluster(), - timeout: timeout, - clock: clockwork.NewRealClock(), - clientCertAuthEnabled: server.ClientCertAuthEnabled(), - } - - mah := &machinesHandler{cluster: server.Cluster()} - - sech := &authHandler{ - lg: lg, - sec: sec, - cluster: server.Cluster(), - clientCertAuthEnabled: server.ClientCertAuthEnabled(), - } - mux.HandleFunc("/", http.NotFound) - mux.Handle(keysPrefix, kh) - mux.Handle(keysPrefix+"/", kh) - mux.HandleFunc(statsPrefix+"/store", sh.serveStore) - mux.HandleFunc(statsPrefix+"/self", sh.serveSelf) - mux.HandleFunc(statsPrefix+"/leader", sh.serveLeader) - mux.Handle(membersPrefix, mh) - mux.Handle(membersPrefix+"/", mh) - mux.Handle(machinesPrefix, mah) - handleAuth(mux, sech) -} - -type keysHandler struct { - lg *zap.Logger - sec v2auth.Store - server etcdserver.ServerV2 - cluster api.Cluster - timeout time.Duration - clientCertAuthEnabled bool -} - -func (h *keysHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "HEAD", "GET", "PUT", "POST", "DELETE") { - return - } - - w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String()) - - ctx, cancel := context.WithTimeout(context.Background(), h.timeout) - defer cancel() - clock := clockwork.NewRealClock() - startTime := clock.Now() - rr, noValueOnSuccess, err := parseKeyRequest(r, clock) - if err != nil { - writeKeyError(h.lg, w, err) - return - } - // The path must be valid at this point (we've parsed the request successfully). - if !hasKeyPrefixAccess(h.lg, h.sec, r, r.URL.Path[len(keysPrefix):], rr.Recursive, h.clientCertAuthEnabled) { - writeKeyNoAuth(w) - return - } - if !rr.Wait { - reportRequestReceived(rr) - } - resp, err := h.server.Do(ctx, rr) - if err != nil { - err = trimErrorPrefix(err, etcdserver.StoreKeysPrefix) - writeKeyError(h.lg, w, err) - reportRequestFailed(rr, err) - return - } - switch { - case resp.Event != nil: - if err := writeKeyEvent(w, resp, noValueOnSuccess); err != nil { - // Should never be reached - h.lg.Warn("failed to write key event", zap.Error(err)) - } - reportRequestCompleted(rr, startTime) - case resp.Watcher != nil: - ctx, cancel := context.WithTimeout(context.Background(), defaultWatchTimeout) - defer cancel() - handleKeyWatch(ctx, h.lg, w, resp, rr.Stream) - default: - writeKeyError(h.lg, w, errors.New("received response with no Event/Watcher")) - } -} - -type machinesHandler struct { - cluster api.Cluster -} - -func (h *machinesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "GET", "HEAD") { - return - } - endpoints := h.cluster.ClientURLs() - w.Write([]byte(strings.Join(endpoints, ", "))) -} - -type membersHandler struct { - lg *zap.Logger - sec v2auth.Store - server etcdserver.ServerV2 - cluster api.Cluster - timeout time.Duration - clock clockwork.Clock - clientCertAuthEnabled bool -} - -func (h *membersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "GET", "POST", "DELETE", "PUT") { - return - } - if !hasWriteRootAccess(h.lg, h.sec, r, h.clientCertAuthEnabled) { - writeNoAuth(h.lg, w, r) - return - } - w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String()) - - ctx, cancel := context.WithTimeout(context.Background(), h.timeout) - defer cancel() - - switch r.Method { - case "GET": - switch trimPrefix(r.URL.Path, membersPrefix) { - case "": - mc := newMemberCollection(h.cluster.Members()) - w.Header().Set("Content-Type", "application/json") - if err := json.NewEncoder(w).Encode(mc); err != nil { - h.lg.Warn("failed to encode members response", zap.Error(err)) - } - case "leader": - id := h.server.Leader() - if id == 0 { - writeError(h.lg, w, r, httptypes.NewHTTPError(http.StatusServiceUnavailable, "During election")) - return - } - m := newMember(h.cluster.Member(id)) - w.Header().Set("Content-Type", "application/json") - if err := json.NewEncoder(w).Encode(m); err != nil { - h.lg.Warn("failed to encode members response", zap.Error(err)) - } - default: - writeError(h.lg, w, r, httptypes.NewHTTPError(http.StatusNotFound, "Not found")) - } - - case "POST": - req := httptypes.MemberCreateRequest{} - if ok := unmarshalRequest(h.lg, r, &req, w); !ok { - return - } - now := h.clock.Now() - m := membership.NewMember("", req.PeerURLs, "", &now) - _, err := h.server.AddMember(ctx, *m) - switch { - case err == membership.ErrIDExists || err == membership.ErrPeerURLexists: - writeError(h.lg, w, r, httptypes.NewHTTPError(http.StatusConflict, err.Error())) - return - case err != nil: - h.lg.Warn( - "failed to add a member", - zap.String("member-id", m.ID.String()), - zap.Error(err), - ) - writeError(h.lg, w, r, err) - return - } - res := newMember(m) - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusCreated) - if err := json.NewEncoder(w).Encode(res); err != nil { - h.lg.Warn("failed to encode members response", zap.Error(err)) - } - - case "DELETE": - id, ok := getID(h.lg, r.URL.Path, w) - if !ok { - return - } - _, err := h.server.RemoveMember(ctx, uint64(id)) - switch { - case err == membership.ErrIDRemoved: - writeError(h.lg, w, r, httptypes.NewHTTPError(http.StatusGone, fmt.Sprintf("Member permanently removed: %s", id))) - case err == membership.ErrIDNotFound: - writeError(h.lg, w, r, httptypes.NewHTTPError(http.StatusNotFound, fmt.Sprintf("No such member: %s", id))) - case err != nil: - h.lg.Warn( - "failed to remove a member", - zap.String("member-id", id.String()), - zap.Error(err), - ) - writeError(h.lg, w, r, err) - default: - w.WriteHeader(http.StatusNoContent) - } - - case "PUT": - id, ok := getID(h.lg, r.URL.Path, w) - if !ok { - return - } - req := httptypes.MemberUpdateRequest{} - if ok := unmarshalRequest(h.lg, r, &req, w); !ok { - return - } - m := membership.Member{ - ID: id, - RaftAttributes: membership.RaftAttributes{PeerURLs: req.PeerURLs.StringSlice()}, - } - _, err := h.server.UpdateMember(ctx, m) - switch { - case err == membership.ErrPeerURLexists: - writeError(h.lg, w, r, httptypes.NewHTTPError(http.StatusConflict, err.Error())) - case err == membership.ErrIDNotFound: - writeError(h.lg, w, r, httptypes.NewHTTPError(http.StatusNotFound, fmt.Sprintf("No such member: %s", id))) - case err != nil: - h.lg.Warn( - "failed to update a member", - zap.String("member-id", m.ID.String()), - zap.Error(err), - ) - writeError(h.lg, w, r, err) - default: - w.WriteHeader(http.StatusNoContent) - } - } -} - -type statsHandler struct { - lg *zap.Logger - stats stats.Stats -} - -func (h *statsHandler) serveStore(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "GET") { - return - } - w.Header().Set("Content-Type", "application/json") - w.Write(h.stats.StoreStats()) -} - -func (h *statsHandler) serveSelf(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "GET") { - return - } - w.Header().Set("Content-Type", "application/json") - w.Write(h.stats.SelfStats()) -} - -func (h *statsHandler) serveLeader(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "GET") { - return - } - stats := h.stats.LeaderStats() - if stats == nil { - etcdhttp.WriteError(h.lg, w, r, httptypes.NewHTTPError(http.StatusForbidden, "not current leader")) - return - } - w.Header().Set("Content-Type", "application/json") - w.Write(stats) -} - -// parseKeyRequest converts a received http.Request on keysPrefix to -// a server Request, performing validation of supplied fields as appropriate. -// If any validation fails, an empty Request and non-nil error is returned. -func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Request, bool, error) { - var noValueOnSuccess bool - emptyReq := etcdserverpb.Request{} - - err := r.ParseForm() - if err != nil { - return emptyReq, false, v2error.NewRequestError( - v2error.EcodeInvalidForm, - err.Error(), - ) - } - - if !strings.HasPrefix(r.URL.Path, keysPrefix) { - return emptyReq, false, v2error.NewRequestError( - v2error.EcodeInvalidForm, - "incorrect key prefix", - ) - } - p := path.Join(etcdserver.StoreKeysPrefix, r.URL.Path[len(keysPrefix):]) - - var pIdx, wIdx uint64 - if pIdx, err = getUint64(r.Form, "prevIndex"); err != nil { - return emptyReq, false, v2error.NewRequestError( - v2error.EcodeIndexNaN, - `invalid value for "prevIndex"`, - ) - } - if wIdx, err = getUint64(r.Form, "waitIndex"); err != nil { - return emptyReq, false, v2error.NewRequestError( - v2error.EcodeIndexNaN, - `invalid value for "waitIndex"`, - ) - } - - var rec, sort, wait, dir, quorum, stream bool - if rec, err = getBool(r.Form, "recursive"); err != nil { - return emptyReq, false, v2error.NewRequestError( - v2error.EcodeInvalidField, - `invalid value for "recursive"`, - ) - } - if sort, err = getBool(r.Form, "sorted"); err != nil { - return emptyReq, false, v2error.NewRequestError( - v2error.EcodeInvalidField, - `invalid value for "sorted"`, - ) - } - if wait, err = getBool(r.Form, "wait"); err != nil { - return emptyReq, false, v2error.NewRequestError( - v2error.EcodeInvalidField, - `invalid value for "wait"`, - ) - } - // TODO(jonboulle): define what parameters dir is/isn't compatible with? - if dir, err = getBool(r.Form, "dir"); err != nil { - return emptyReq, false, v2error.NewRequestError( - v2error.EcodeInvalidField, - `invalid value for "dir"`, - ) - } - if quorum, err = getBool(r.Form, "quorum"); err != nil { - return emptyReq, false, v2error.NewRequestError( - v2error.EcodeInvalidField, - `invalid value for "quorum"`, - ) - } - if stream, err = getBool(r.Form, "stream"); err != nil { - return emptyReq, false, v2error.NewRequestError( - v2error.EcodeInvalidField, - `invalid value for "stream"`, - ) - } - - if wait && r.Method != "GET" { - return emptyReq, false, v2error.NewRequestError( - v2error.EcodeInvalidField, - `"wait" can only be used with GET requests`, - ) - } - - pV := r.FormValue("prevValue") - if _, ok := r.Form["prevValue"]; ok && pV == "" { - return emptyReq, false, v2error.NewRequestError( - v2error.EcodePrevValueRequired, - `"prevValue" cannot be empty`, - ) - } - - if noValueOnSuccess, err = getBool(r.Form, "noValueOnSuccess"); err != nil { - return emptyReq, false, v2error.NewRequestError( - v2error.EcodeInvalidField, - `invalid value for "noValueOnSuccess"`, - ) - } - - // TTL is nullable, so leave it null if not specified - // or an empty string - var ttl *uint64 - if len(r.FormValue("ttl")) > 0 { - i, err := getUint64(r.Form, "ttl") - if err != nil { - return emptyReq, false, v2error.NewRequestError( - v2error.EcodeTTLNaN, - `invalid value for "ttl"`, - ) - } - ttl = &i - } - - // prevExist is nullable, so leave it null if not specified - var pe *bool - if _, ok := r.Form["prevExist"]; ok { - bv, err := getBool(r.Form, "prevExist") - if err != nil { - return emptyReq, false, v2error.NewRequestError( - v2error.EcodeInvalidField, - "invalid value for prevExist", - ) - } - pe = &bv - } - - // refresh is nullable, so leave it null if not specified - var refresh *bool - if _, ok := r.Form["refresh"]; ok { - bv, err := getBool(r.Form, "refresh") - if err != nil { - return emptyReq, false, v2error.NewRequestError( - v2error.EcodeInvalidField, - "invalid value for refresh", - ) - } - refresh = &bv - if refresh != nil && *refresh { - val := r.FormValue("value") - if _, ok := r.Form["value"]; ok && val != "" { - return emptyReq, false, v2error.NewRequestError( - v2error.EcodeRefreshValue, - `A value was provided on a refresh`, - ) - } - if ttl == nil { - return emptyReq, false, v2error.NewRequestError( - v2error.EcodeRefreshTTLRequired, - `No TTL value set`, - ) - } - } - } - - rr := etcdserverpb.Request{ - Method: r.Method, - Path: p, - Val: r.FormValue("value"), - Dir: dir, - PrevValue: pV, - PrevIndex: pIdx, - PrevExist: pe, - Wait: wait, - Since: wIdx, - Recursive: rec, - Sorted: sort, - Quorum: quorum, - Stream: stream, - } - - if pe != nil { - rr.PrevExist = pe - } - - if refresh != nil { - rr.Refresh = refresh - } - - // Null TTL is equivalent to unset Expiration - if ttl != nil { - expr := time.Duration(*ttl) * time.Second - rr.Expiration = clock.Now().Add(expr).UnixNano() - } - - return rr, noValueOnSuccess, nil -} - -// writeKeyEvent trims the prefix of key path in a single Event under -// StoreKeysPrefix, serializes it and writes the resulting JSON to the given -// ResponseWriter, along with the appropriate headers. -func writeKeyEvent(w http.ResponseWriter, resp etcdserver.Response, noValueOnSuccess bool) error { - ev := resp.Event - if ev == nil { - return errors.New("cannot write empty Event") - } - w.Header().Set("Content-Type", "application/json") - w.Header().Set("X-Etcd-Index", fmt.Sprint(ev.EtcdIndex)) - w.Header().Set("X-Raft-Index", fmt.Sprint(resp.Index)) - w.Header().Set("X-Raft-Term", fmt.Sprint(resp.Term)) - - if ev.IsCreated() { - w.WriteHeader(http.StatusCreated) - } - - ev = trimEventPrefix(ev, etcdserver.StoreKeysPrefix) - if noValueOnSuccess && - (ev.Action == v2store.Set || ev.Action == v2store.CompareAndSwap || - ev.Action == v2store.Create || ev.Action == v2store.Update) { - ev.Node = nil - ev.PrevNode = nil - } - return json.NewEncoder(w).Encode(ev) -} - -func writeKeyNoAuth(w http.ResponseWriter) { - e := v2error.NewError(v2error.EcodeUnauthorized, "Insufficient credentials", 0) - e.WriteTo(w) -} - -// writeKeyError logs and writes the given Error to the ResponseWriter. -// If Error is not an etcdErr, the error will be converted to an etcd error. -func writeKeyError(lg *zap.Logger, w http.ResponseWriter, err error) { - if err == nil { - return - } - switch e := err.(type) { - case *v2error.Error: - e.WriteTo(w) - default: - switch err { - case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost: - if lg != nil { - lg.Warn( - "v2 response error", - zap.String("internal-server-error", err.Error()), - ) - } - default: - if lg != nil { - lg.Warn( - "unexpected v2 response error", - zap.String("internal-server-error", err.Error()), - ) - } - } - ee := v2error.NewError(v2error.EcodeRaftInternal, err.Error(), 0) - ee.WriteTo(w) - } -} - -func handleKeyWatch(ctx context.Context, lg *zap.Logger, w http.ResponseWriter, resp etcdserver.Response, stream bool) { - wa := resp.Watcher - defer wa.Remove() - ech := wa.EventChan() - var nch <-chan bool - if x, ok := w.(http.CloseNotifier); ok { - nch = x.CloseNotify() - } - - w.Header().Set("Content-Type", "application/json") - w.Header().Set("X-Etcd-Index", fmt.Sprint(wa.StartIndex())) - w.Header().Set("X-Raft-Index", fmt.Sprint(resp.Index)) - w.Header().Set("X-Raft-Term", fmt.Sprint(resp.Term)) - w.WriteHeader(http.StatusOK) - - // Ensure headers are flushed early, in case of long polling - w.(http.Flusher).Flush() - - for { - select { - case <-nch: - // Client closed connection. Nothing to do. - return - case <-ctx.Done(): - // Timed out. net/http will close the connection for us, so nothing to do. - return - case ev, ok := <-ech: - if !ok { - // If the channel is closed this may be an indication of - // that notifications are much more than we are able to - // send to the client in time. Then we simply end streaming. - return - } - ev = trimEventPrefix(ev, etcdserver.StoreKeysPrefix) - if err := json.NewEncoder(w).Encode(ev); err != nil { - // Should never be reached - lg.Warn("failed to encode event", zap.Error(err)) - return - } - if !stream { - return - } - w.(http.Flusher).Flush() - } - } -} - -func trimEventPrefix(ev *v2store.Event, prefix string) *v2store.Event { - if ev == nil { - return nil - } - // Since the *Event may reference one in the store history - // history, we must copy it before modifying - e := ev.Clone() - trimNodeExternPrefix(e.Node, prefix) - trimNodeExternPrefix(e.PrevNode, prefix) - return e -} - -func trimNodeExternPrefix(n *v2store.NodeExtern, prefix string) { - if n == nil { - return - } - n.Key = strings.TrimPrefix(n.Key, prefix) - for _, nn := range n.Nodes { - trimNodeExternPrefix(nn, prefix) - } -} - -func trimErrorPrefix(err error, prefix string) error { - if e, ok := err.(*v2error.Error); ok { - e.Cause = strings.TrimPrefix(e.Cause, prefix) - } - return err -} - -func unmarshalRequest(lg *zap.Logger, r *http.Request, req json.Unmarshaler, w http.ResponseWriter) bool { - ctype := r.Header.Get("Content-Type") - semicolonPosition := strings.Index(ctype, ";") - if semicolonPosition != -1 { - ctype = strings.TrimSpace(strings.ToLower(ctype[0:semicolonPosition])) - } - if ctype != "application/json" { - writeError(lg, w, r, httptypes.NewHTTPError(http.StatusUnsupportedMediaType, fmt.Sprintf("Bad Content-Type %s, accept application/json", ctype))) - return false - } - b, err := ioutil.ReadAll(r.Body) - if err != nil { - writeError(lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, err.Error())) - return false - } - if err := req.UnmarshalJSON(b); err != nil { - writeError(lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, err.Error())) - return false - } - return true -} - -func getID(lg *zap.Logger, p string, w http.ResponseWriter) (types.ID, bool) { - idStr := trimPrefix(p, membersPrefix) - if idStr == "" { - http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) - return 0, false - } - id, err := types.IDFromString(idStr) - if err != nil { - writeError(lg, w, nil, httptypes.NewHTTPError(http.StatusNotFound, fmt.Sprintf("No such member: %s", idStr))) - return 0, false - } - return id, true -} - -// getUint64 extracts a uint64 by the given key from a Form. If the key does -// not exist in the form, 0 is returned. If the key exists but the value is -// badly formed, an error is returned. If multiple values are present only the -// first is considered. -func getUint64(form url.Values, key string) (i uint64, err error) { - if vals, ok := form[key]; ok { - i, err = strconv.ParseUint(vals[0], 10, 64) - } - return -} - -// getBool extracts a bool by the given key from a Form. If the key does not -// exist in the form, false is returned. If the key exists but the value is -// badly formed, an error is returned. If multiple values are present only the -// first is considered. -func getBool(form url.Values, key string) (b bool, err error) { - if vals, ok := form[key]; ok { - b, err = strconv.ParseBool(vals[0]) - } - return -} - -// trimPrefix removes a given prefix and any slash following the prefix -// e.g.: trimPrefix("foo", "foo") == trimPrefix("foo/", "foo") == "" -func trimPrefix(p, prefix string) (s string) { - s = strings.TrimPrefix(p, prefix) - s = strings.TrimPrefix(s, "/") - return -} - -func newMemberCollection(ms []*membership.Member) *httptypes.MemberCollection { - c := httptypes.MemberCollection(make([]httptypes.Member, len(ms))) - - for i, m := range ms { - c[i] = newMember(m) - } - - return &c -} - -func newMember(m *membership.Member) httptypes.Member { - tm := httptypes.Member{ - ID: m.ID.String(), - Name: m.Name, - PeerURLs: make([]string, len(m.PeerURLs)), - ClientURLs: make([]string, len(m.ClientURLs)), - } - - copy(tm.PeerURLs, m.PeerURLs) - copy(tm.ClientURLs, m.ClientURLs) - - return tm -} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2http/client_auth.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2http/client_auth.go deleted file mode 100644 index 2c6e7744ed7b4..0000000000000 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2http/client_auth.go +++ /dev/null @@ -1,604 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2http - -import ( - "encoding/json" - "net/http" - "path" - "strings" - - "go.etcd.io/etcd/server/v3/etcdserver/api" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2auth" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2http/httptypes" - - "go.uber.org/zap" -) - -type authHandler struct { - lg *zap.Logger - sec v2auth.Store - cluster api.Cluster - clientCertAuthEnabled bool -} - -func hasWriteRootAccess(lg *zap.Logger, sec v2auth.Store, r *http.Request, clientCertAuthEnabled bool) bool { - if r.Method == "GET" || r.Method == "HEAD" { - return true - } - return hasRootAccess(lg, sec, r, clientCertAuthEnabled) -} - -func userFromBasicAuth(lg *zap.Logger, sec v2auth.Store, r *http.Request) *v2auth.User { - username, password, ok := r.BasicAuth() - if !ok { - lg.Warn("malformed basic auth encoding") - return nil - } - user, err := sec.GetUser(username) - if err != nil { - return nil - } - - ok = sec.CheckPassword(user, password) - if !ok { - lg.Warn("incorrect password", zap.String("user-name", username)) - return nil - } - return &user -} - -func userFromClientCertificate(lg *zap.Logger, sec v2auth.Store, r *http.Request) *v2auth.User { - if r.TLS == nil { - return nil - } - - for _, chains := range r.TLS.VerifiedChains { - for _, chain := range chains { - lg.Debug("found common name", zap.String("common-name", chain.Subject.CommonName)) - user, err := sec.GetUser(chain.Subject.CommonName) - if err == nil { - lg.Debug( - "authenticated a user via common name", - zap.String("user-name", user.User), - zap.String("common-name", chain.Subject.CommonName), - ) - return &user - } - } - } - return nil -} - -func hasRootAccess(lg *zap.Logger, sec v2auth.Store, r *http.Request, clientCertAuthEnabled bool) bool { - if sec == nil { - // No store means no auth available, eg, tests. - return true - } - if !sec.AuthEnabled() { - return true - } - - var rootUser *v2auth.User - if r.Header.Get("Authorization") == "" && clientCertAuthEnabled { - rootUser = userFromClientCertificate(lg, sec, r) - if rootUser == nil { - return false - } - } else { - rootUser = userFromBasicAuth(lg, sec, r) - if rootUser == nil { - return false - } - } - - for _, role := range rootUser.Roles { - if role == v2auth.RootRoleName { - return true - } - } - - lg.Warn( - "a user does not have root role for resource", - zap.String("root-user", rootUser.User), - zap.String("root-role-name", v2auth.RootRoleName), - zap.String("resource-path", r.URL.Path), - ) - return false -} - -func hasKeyPrefixAccess(lg *zap.Logger, sec v2auth.Store, r *http.Request, key string, recursive, clientCertAuthEnabled bool) bool { - if sec == nil { - // No store means no auth available, eg, tests. - return true - } - if !sec.AuthEnabled() { - return true - } - - var user *v2auth.User - if r.Header.Get("Authorization") == "" { - if clientCertAuthEnabled { - user = userFromClientCertificate(lg, sec, r) - } - if user == nil { - return hasGuestAccess(lg, sec, r, key) - } - } else { - user = userFromBasicAuth(lg, sec, r) - if user == nil { - return false - } - } - - writeAccess := r.Method != "GET" && r.Method != "HEAD" - for _, roleName := range user.Roles { - role, err := sec.GetRole(roleName) - if err != nil { - continue - } - if recursive { - if role.HasRecursiveAccess(key, writeAccess) { - return true - } - } else if role.HasKeyAccess(key, writeAccess) { - return true - } - } - - lg.Warn( - "invalid access for user on key", - zap.String("user-name", user.User), - zap.String("key", key), - ) - return false -} - -func hasGuestAccess(lg *zap.Logger, sec v2auth.Store, r *http.Request, key string) bool { - writeAccess := r.Method != "GET" && r.Method != "HEAD" - role, err := sec.GetRole(v2auth.GuestRoleName) - if err != nil { - return false - } - if role.HasKeyAccess(key, writeAccess) { - return true - } - - lg.Warn( - "invalid access for a guest role on key", - zap.String("role-name", v2auth.GuestRoleName), - zap.String("key", key), - ) - return false -} - -func writeNoAuth(lg *zap.Logger, w http.ResponseWriter, r *http.Request) { - herr := httptypes.NewHTTPError(http.StatusUnauthorized, "Insufficient credentials") - if err := herr.WriteTo(w); err != nil { - lg.Debug( - "failed to write v2 HTTP error", - zap.String("remote-addr", r.RemoteAddr), - zap.Error(err), - ) - } -} - -func handleAuth(mux *http.ServeMux, sh *authHandler) { - mux.HandleFunc(authPrefix+"/roles", authCapabilityHandler(sh.baseRoles)) - mux.HandleFunc(authPrefix+"/roles/", authCapabilityHandler(sh.handleRoles)) - mux.HandleFunc(authPrefix+"/users", authCapabilityHandler(sh.baseUsers)) - mux.HandleFunc(authPrefix+"/users/", authCapabilityHandler(sh.handleUsers)) - mux.HandleFunc(authPrefix+"/enable", authCapabilityHandler(sh.enableDisable)) -} - -func (sh *authHandler) baseRoles(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "GET") { - return - } - if !hasRootAccess(sh.lg, sh.sec, r, sh.clientCertAuthEnabled) { - writeNoAuth(sh.lg, w, r) - return - } - - w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String()) - w.Header().Set("Content-Type", "application/json") - - roles, err := sh.sec.AllRoles() - if err != nil { - writeError(sh.lg, w, r, err) - return - } - if roles == nil { - roles = make([]string, 0) - } - - err = r.ParseForm() - if err != nil { - writeError(sh.lg, w, r, err) - return - } - - var rolesCollections struct { - Roles []v2auth.Role `json:"roles"` - } - for _, roleName := range roles { - var role v2auth.Role - role, err = sh.sec.GetRole(roleName) - if err != nil { - writeError(sh.lg, w, r, err) - return - } - rolesCollections.Roles = append(rolesCollections.Roles, role) - } - err = json.NewEncoder(w).Encode(rolesCollections) - - if err != nil { - sh.lg.Warn( - "failed to encode base roles", - zap.String("url", r.URL.String()), - zap.Error(err), - ) - writeError(sh.lg, w, r, err) - return - } -} - -func (sh *authHandler) handleRoles(w http.ResponseWriter, r *http.Request) { - subpath := path.Clean(r.URL.Path[len(authPrefix):]) - // Split "/roles/rolename/command". - // First item is an empty string, second is "roles" - pieces := strings.Split(subpath, "/") - if len(pieces) == 2 { - sh.baseRoles(w, r) - return - } - if len(pieces) != 3 { - writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid path")) - return - } - sh.forRole(w, r, pieces[2]) -} - -func (sh *authHandler) forRole(w http.ResponseWriter, r *http.Request, role string) { - if !allowMethod(w, r.Method, "GET", "PUT", "DELETE") { - return - } - if !hasRootAccess(sh.lg, sh.sec, r, sh.clientCertAuthEnabled) { - writeNoAuth(sh.lg, w, r) - return - } - w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String()) - w.Header().Set("Content-Type", "application/json") - - switch r.Method { - case "GET": - data, err := sh.sec.GetRole(role) - if err != nil { - writeError(sh.lg, w, r, err) - return - } - err = json.NewEncoder(w).Encode(data) - if err != nil { - sh.lg.Warn( - "failed to encode a role", - zap.String("url", r.URL.String()), - zap.Error(err), - ) - return - } - return - - case "PUT": - var in v2auth.Role - err := json.NewDecoder(r.Body).Decode(&in) - if err != nil { - writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid JSON in request body.")) - return - } - if in.Role != role { - writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Role JSON name does not match the name in the URL")) - return - } - - var out v2auth.Role - - // create - if in.Grant.IsEmpty() && in.Revoke.IsEmpty() { - err = sh.sec.CreateRole(in) - if err != nil { - writeError(sh.lg, w, r, err) - return - } - w.WriteHeader(http.StatusCreated) - out = in - } else { - if !in.Permissions.IsEmpty() { - writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Role JSON contains both permissions and grant/revoke")) - return - } - out, err = sh.sec.UpdateRole(in) - if err != nil { - writeError(sh.lg, w, r, err) - return - } - w.WriteHeader(http.StatusOK) - } - - err = json.NewEncoder(w).Encode(out) - if err != nil { - sh.lg.Warn( - "failed to encode a role", - zap.String("url", r.URL.String()), - zap.Error(err), - ) - return - } - return - - case "DELETE": - err := sh.sec.DeleteRole(role) - if err != nil { - writeError(sh.lg, w, r, err) - return - } - } -} - -type userWithRoles struct { - User string `json:"user"` - Roles []v2auth.Role `json:"roles,omitempty"` -} - -type usersCollections struct { - Users []userWithRoles `json:"users"` -} - -func (sh *authHandler) baseUsers(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "GET") { - return - } - if !hasRootAccess(sh.lg, sh.sec, r, sh.clientCertAuthEnabled) { - writeNoAuth(sh.lg, w, r) - return - } - w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String()) - w.Header().Set("Content-Type", "application/json") - - users, err := sh.sec.AllUsers() - if err != nil { - writeError(sh.lg, w, r, err) - return - } - if users == nil { - users = make([]string, 0) - } - - err = r.ParseForm() - if err != nil { - writeError(sh.lg, w, r, err) - return - } - - ucs := usersCollections{} - for _, userName := range users { - var user v2auth.User - user, err = sh.sec.GetUser(userName) - if err != nil { - writeError(sh.lg, w, r, err) - return - } - - uwr := userWithRoles{User: user.User} - for _, roleName := range user.Roles { - var role v2auth.Role - role, err = sh.sec.GetRole(roleName) - if err != nil { - continue - } - uwr.Roles = append(uwr.Roles, role) - } - - ucs.Users = append(ucs.Users, uwr) - } - err = json.NewEncoder(w).Encode(ucs) - - if err != nil { - sh.lg.Warn( - "failed to encode users", - zap.String("url", r.URL.String()), - zap.Error(err), - ) - writeError(sh.lg, w, r, err) - return - } -} - -func (sh *authHandler) handleUsers(w http.ResponseWriter, r *http.Request) { - subpath := path.Clean(r.URL.Path[len(authPrefix):]) - // Split "/users/username". - // First item is an empty string, second is "users" - pieces := strings.Split(subpath, "/") - if len(pieces) == 2 { - sh.baseUsers(w, r) - return - } - if len(pieces) != 3 { - writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid path")) - return - } - sh.forUser(w, r, pieces[2]) -} - -func (sh *authHandler) forUser(w http.ResponseWriter, r *http.Request, user string) { - if !allowMethod(w, r.Method, "GET", "PUT", "DELETE") { - return - } - if !hasRootAccess(sh.lg, sh.sec, r, sh.clientCertAuthEnabled) { - writeNoAuth(sh.lg, w, r) - return - } - w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String()) - w.Header().Set("Content-Type", "application/json") - - switch r.Method { - case "GET": - u, err := sh.sec.GetUser(user) - if err != nil { - writeError(sh.lg, w, r, err) - return - } - - err = r.ParseForm() - if err != nil { - writeError(sh.lg, w, r, err) - return - } - - uwr := userWithRoles{User: u.User} - for _, roleName := range u.Roles { - var role v2auth.Role - role, err = sh.sec.GetRole(roleName) - if err != nil { - writeError(sh.lg, w, r, err) - return - } - uwr.Roles = append(uwr.Roles, role) - } - err = json.NewEncoder(w).Encode(uwr) - - if err != nil { - sh.lg.Warn( - "failed to encode roles", - zap.String("url", r.URL.String()), - zap.Error(err), - ) - return - } - return - - case "PUT": - var u v2auth.User - err := json.NewDecoder(r.Body).Decode(&u) - if err != nil { - writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid JSON in request body.")) - return - } - if u.User != user { - writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "User JSON name does not match the name in the URL")) - return - } - - var ( - out v2auth.User - created bool - ) - - if len(u.Grant) == 0 && len(u.Revoke) == 0 { - // create or update - if len(u.Roles) != 0 { - out, err = sh.sec.CreateUser(u) - } else { - // if user passes in both password and roles, we are unsure about his/her - // intention. - out, created, err = sh.sec.CreateOrUpdateUser(u) - } - - if err != nil { - writeError(sh.lg, w, r, err) - return - } - } else { - // update case - if len(u.Roles) != 0 { - writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "User JSON contains both roles and grant/revoke")) - return - } - out, err = sh.sec.UpdateUser(u) - if err != nil { - writeError(sh.lg, w, r, err) - return - } - } - - if created { - w.WriteHeader(http.StatusCreated) - } else { - w.WriteHeader(http.StatusOK) - } - - out.Password = "" - - err = json.NewEncoder(w).Encode(out) - if err != nil { - sh.lg.Warn( - "failed to encode a user", - zap.String("url", r.URL.String()), - zap.Error(err), - ) - return - } - return - - case "DELETE": - err := sh.sec.DeleteUser(user) - if err != nil { - writeError(sh.lg, w, r, err) - return - } - } -} - -type enabled struct { - Enabled bool `json:"enabled"` -} - -func (sh *authHandler) enableDisable(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "GET", "PUT", "DELETE") { - return - } - if !hasWriteRootAccess(sh.lg, sh.sec, r, sh.clientCertAuthEnabled) { - writeNoAuth(sh.lg, w, r) - return - } - w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String()) - w.Header().Set("Content-Type", "application/json") - isEnabled := sh.sec.AuthEnabled() - switch r.Method { - case "GET": - jsonDict := enabled{isEnabled} - err := json.NewEncoder(w).Encode(jsonDict) - if err != nil { - sh.lg.Warn( - "failed to encode a auth state", - zap.String("url", r.URL.String()), - zap.Error(err), - ) - } - - case "PUT": - err := sh.sec.EnableAuth() - if err != nil { - writeError(sh.lg, w, r, err) - return - } - - case "DELETE": - err := sh.sec.DisableAuth() - if err != nil { - writeError(sh.lg, w, r, err) - return - } - } -} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2http/http.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2http/http.go deleted file mode 100644 index 88138b80a8a9d..0000000000000 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2http/http.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2http - -import ( - "math" - "net/http" - "strings" - "time" - - "go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2auth" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2http/httptypes" - - "go.uber.org/zap" -) - -const ( - // time to wait for a Watch request - defaultWatchTimeout = time.Duration(math.MaxInt64) -) - -func writeError(lg *zap.Logger, w http.ResponseWriter, r *http.Request, err error) { - if err == nil { - return - } - if e, ok := err.(v2auth.Error); ok { - herr := httptypes.NewHTTPError(e.HTTPStatus(), e.Error()) - if et := herr.WriteTo(w); et != nil { - if lg != nil { - lg.Debug( - "failed to write v2 HTTP error", - zap.String("remote-addr", r.RemoteAddr), - zap.String("v2auth-error", e.Error()), - zap.Error(et), - ) - } - } - return - } - etcdhttp.WriteError(lg, w, r, err) -} - -// allowMethod verifies that the given method is one of the allowed methods, -// and if not, it writes an error to w. A boolean is returned indicating -// whether or not the method is allowed. -func allowMethod(w http.ResponseWriter, m string, ms ...string) bool { - for _, meth := range ms { - if m == meth { - return true - } - } - w.Header().Set("Allow", strings.Join(ms, ",")) - http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) - return false -} - -func requestLogger(lg *zap.Logger, handler http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if lg != nil { - lg.Debug( - "handling HTTP request", - zap.String("method", r.Method), - zap.String("request-uri", r.RequestURI), - zap.String("remote-addr", r.RemoteAddr), - ) - } - handler.ServeHTTP(w, r) - }) -} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2http/httptypes/member.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2http/httptypes/member.go deleted file mode 100644 index a5467be91ee20..0000000000000 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2http/httptypes/member.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package httptypes defines how etcd's HTTP API entities are serialized to and -// deserialized from JSON. -package httptypes - -import ( - "encoding/json" - - "go.etcd.io/etcd/client/pkg/v3/types" -) - -type Member struct { - ID string `json:"id"` - Name string `json:"name"` - PeerURLs []string `json:"peerURLs"` - ClientURLs []string `json:"clientURLs"` -} - -type MemberCreateRequest struct { - PeerURLs types.URLs -} - -type MemberUpdateRequest struct { - MemberCreateRequest -} - -func (m *MemberCreateRequest) UnmarshalJSON(data []byte) error { - s := struct { - PeerURLs []string `json:"peerURLs"` - }{} - - err := json.Unmarshal(data, &s) - if err != nil { - return err - } - - urls, err := types.NewURLs(s.PeerURLs) - if err != nil { - return err - } - - m.PeerURLs = urls - return nil -} - -type MemberCollection []Member - -func (c *MemberCollection) MarshalJSON() ([]byte, error) { - d := struct { - Members []Member `json:"members"` - }{ - Members: []Member(*c), - } - - return json.Marshal(d) -} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2http/metrics.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2http/metrics.go deleted file mode 100644 index bdbd8c71c1ba0..0000000000000 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2http/metrics.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2http - -import ( - "net/http" - "strconv" - "time" - - "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2error" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2http/httptypes" - - "github.com/prometheus/client_golang/prometheus" -) - -var ( - incomingEvents = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "http", - Name: "received_total", - Help: "Counter of requests received into the system (successfully parsed and authd).", - }, []string{"method"}) - - failedEvents = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "http", - Name: "failed_total", - Help: "Counter of handle failures of requests (non-watches), by method (GET/PUT etc.) and code (400, 500 etc.).", - }, []string{"method", "code"}) - - successfulEventsHandlingSec = prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Namespace: "etcd", - Subsystem: "http", - Name: "successful_duration_seconds", - Help: "Bucketed histogram of processing time (s) of successfully handled requests (non-watches), by method (GET/PUT etc.).", - - // lowest bucket start of upper bound 0.0005 sec (0.5 ms) with factor 2 - // highest bucket start of 0.0005 sec * 2^12 == 2.048 sec - Buckets: prometheus.ExponentialBuckets(0.0005, 2, 13), - }, []string{"method"}) -) - -func init() { - prometheus.MustRegister(incomingEvents) - prometheus.MustRegister(failedEvents) - prometheus.MustRegister(successfulEventsHandlingSec) -} - -func reportRequestReceived(request etcdserverpb.Request) { - incomingEvents.WithLabelValues(methodFromRequest(request)).Inc() -} - -func reportRequestCompleted(request etcdserverpb.Request, startTime time.Time) { - method := methodFromRequest(request) - successfulEventsHandlingSec.WithLabelValues(method).Observe(time.Since(startTime).Seconds()) -} - -func reportRequestFailed(request etcdserverpb.Request, err error) { - method := methodFromRequest(request) - failedEvents.WithLabelValues(method, strconv.Itoa(codeFromError(err))).Inc() -} - -func methodFromRequest(request etcdserverpb.Request) string { - if request.Method == "GET" && request.Quorum { - return "QGET" - } - return request.Method -} - -func codeFromError(err error) int { - if err == nil { - return http.StatusInternalServerError - } - switch e := err.(type) { - case *v2error.Error: - return e.StatusCode() - case *httptypes.HTTPError: - return e.Code - default: - return http.StatusInternalServerError - } -} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2stats/queue.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2stats/queue.go index 2c3dff3d0ffc5..9db8a14bcaed2 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2stats/queue.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2stats/queue.go @@ -74,7 +74,6 @@ func (q *statsQueue) Insert(p *RequestStats) { q.items[q.back] = p q.totalReqSize += q.items[q.back].Size - } // Rate function returns the package rate and byte rate diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2stats/server.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2stats/server.go index 45effb1edc555..e8d218a72095e 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2stats/server.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2stats/server.go @@ -20,7 +20,7 @@ import ( "sync" "time" - "go.etcd.io/etcd/raft/v3" + "go.etcd.io/raft/v3" ) // ServerStats encapsulates various statistics about an EtcdServer and its @@ -59,7 +59,7 @@ type serverStats struct { StartTime time.Time `json:"startTime"` } `json:"leaderInfo"` - RecvAppendRequestCnt uint64 `json:"recvAppendRequestCnt,"` + RecvAppendRequestCnt uint64 `json:"recvAppendRequestCnt"` RecvingPkgRate float64 `json:"recvPkgRate,omitempty"` RecvingBandwidthRate float64 `json:"recvBandwidthRate,omitempty"` diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/event_history.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/event_history.go index c9bcdca051320..11c8b0176f7fb 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/event_history.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/event_history.go @@ -125,5 +125,4 @@ func (eh *EventHistory) clone() *EventHistory { Queue: clonedQueue, LastIndex: eh.LastIndex, } - } diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/node.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/node.go index 9fe6263e2e8bf..7e5c3e8deb51a 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/node.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/node.go @@ -19,9 +19,9 @@ import ( "sort" "time" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2error" - "github.com/jonboulle/clockwork" + + "go.etcd.io/etcd/server/v3/etcdserver/api/v2error" ) // explanations of Compare function result @@ -277,7 +277,6 @@ func (n *node) Repr(recursive, sorted bool, clock clockwork.Clock) *NodeExtern { i := 0 for _, child := range children { - if child.IsHidden() { // get will not list hidden node continue } diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/node_extern.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/node_extern.go index b3bf5f3c97680..ff2e13e6391e5 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/node_extern.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/node_extern.go @@ -62,7 +62,6 @@ func (eNode *NodeExtern) loadInternalNode(n *node, recursive, sorted bool, clock if sorted { sort.Sort(eNode.Nodes) } - } else { // node is a file value, _ := n.Read() eNode.Value = &value diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/stats.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/stats.go index 9151799da7b6e..55ede56d10c9d 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/stats.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/stats.go @@ -104,7 +104,7 @@ func (s *Stats) clone() *Stats { } } -func (s *Stats) toJson() []byte { +func (s *Stats) toJSON() []byte { b, _ := json.Marshal(s) return b } diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/store.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/store.go index 32cb26ad964a1..117f1c03e14ac 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/store.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/store.go @@ -23,10 +23,10 @@ import ( "sync" "time" + "github.com/jonboulle/clockwork" + "go.etcd.io/etcd/client/pkg/v3/types" "go.etcd.io/etcd/server/v3/etcdserver/api/v2error" - - "github.com/jonboulle/clockwork" ) // The default version to set when the store is first initialized. @@ -99,7 +99,7 @@ func newStore(namespaces ...string) *store { } s.Stats = newStats() s.WatcherHub = newWatchHub(1000) - s.ttlKeyHeap = newTtlKeyHeap() + s.ttlKeyHeap = newTTLKeyHeap() s.readonlySet = types.NewUnsafeSet(append(namespaces, "/")...) return s } @@ -258,7 +258,6 @@ func getCompareFailCause(n *node, which int, prevValue string, prevIndex uint64) func (s *store) CompareAndSwap(nodePath string, prevValue string, prevIndex uint64, value string, expireOpts TTLOptionSet) (*Event, error) { - var err *v2error.Error s.worldLock.Lock() @@ -535,7 +534,7 @@ func (s *store) Update(nodePath string, newValue string, expireOpts TTLOptionSet eNode := e.Node if err := n.Write(newValue, nextIndex); err != nil { - return nil, fmt.Errorf("nodePath %v : %v", nodePath, err) + return nil, fmt.Errorf("nodePath %v : %w", nodePath, err) } if n.IsDir() { @@ -565,7 +564,6 @@ func (s *store) Update(nodePath string, newValue string, expireOpts TTLOptionSet func (s *store) internalCreate(nodePath string, dir bool, value string, unique, replace bool, expireTime time.Time, action string) (*Event, *v2error.Error) { - currIndex, nextIndex := s.CurrentIndex, s.CurrentIndex+1 if unique { // append unique item under the node path @@ -624,7 +622,6 @@ func (s *store) internalCreate(nodePath string, dir bool, value string, unique, eNode.Value = &valueCopy n = newKV(s, nodePath, value, nextIndex, d, expireTime) - } else { // create directory eNode.Dir = true @@ -653,7 +650,6 @@ func (s *store) internalGet(nodePath string) (*node, *v2error.Error) { nodePath = path.Clean(path.Join("/", nodePath)) walkFunc := func(parent *node, name string) (*node, *v2error.Error) { - if !parent.IsDir() { err := v2error.NewError(v2error.EcodeNotDir, parent.Path, s.CurrentIndex) return nil, err @@ -707,7 +703,6 @@ func (s *store) DeleteExpiredKeys(cutoff time.Time) { s.WatcherHub.notify(e) } - } // checkDir will check whether the component is a directory under parent node. @@ -781,15 +776,17 @@ func (s *store) Recovery(state []byte) error { return err } - s.ttlKeyHeap = newTtlKeyHeap() + s.ttlKeyHeap = newTTLKeyHeap() s.Root.recoverAndclean() return nil } +//revive:disable:var-naming func (s *store) JsonStats() []byte { + //revive:enable:var-naming s.Stats.Watchers = uint64(s.WatcherHub.count) - return s.Stats.toJson() + return s.Stats.toJSON() } func (s *store) HasTTLKeys() bool { diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/ttl_key_heap.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/ttl_key_heap.go index 477d2b9f3aa1c..77ca8e9ed608f 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/ttl_key_heap.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/ttl_key_heap.go @@ -22,7 +22,7 @@ type ttlKeyHeap struct { keyMap map[*node]int } -func newTtlKeyHeap() *ttlKeyHeap { +func newTTLKeyHeap() *ttlKeyHeap { h := &ttlKeyHeap{keyMap: make(map[*node]int)} heap.Init(h) return h @@ -45,13 +45,13 @@ func (h ttlKeyHeap) Swap(i, j int) { h.keyMap[h.array[j]] = j } -func (h *ttlKeyHeap) Push(x interface{}) { +func (h *ttlKeyHeap) Push(x any) { n, _ := x.(*node) h.keyMap[n] = len(h.array) h.array = append(h.array, n) } -func (h *ttlKeyHeap) Pop() interface{} { +func (h *ttlKeyHeap) Pop() any { old := h.array n := len(old) x := old[n-1] @@ -77,7 +77,7 @@ func (h *ttlKeyHeap) pop() *node { return n } -func (h *ttlKeyHeap) push(x interface{}) { +func (h *ttlKeyHeap) push(x any) { heap.Push(h, x) } diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2v3/cluster.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2v3/cluster.go deleted file mode 100644 index d275e05718391..0000000000000 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2v3/cluster.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2v3 - -import ( - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/server/v3/etcdserver/api/membership" - - "github.com/coreos/go-semver/semver" -) - -func (s *v2v3Server) ID() types.ID { - // TODO: use an actual member ID - return types.ID(0xe7cd2f00d) -} -func (s *v2v3Server) ClientURLs() []string { panic("STUB") } -func (s *v2v3Server) Members() []*membership.Member { panic("STUB") } -func (s *v2v3Server) Member(id types.ID) *membership.Member { panic("STUB") } -func (s *v2v3Server) Version() *semver.Version { panic("STUB") } diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2v3/server.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2v3/server.go deleted file mode 100644 index 71557ceb5c842..0000000000000 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2v3/server.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2v3 - -import ( - "context" - "net/http" - "time" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/server/v3/etcdserver" - "go.etcd.io/etcd/server/v3/etcdserver/api" - "go.etcd.io/etcd/server/v3/etcdserver/api/membership" - - "github.com/coreos/go-semver/semver" - "go.uber.org/zap" -) - -type fakeStats struct{} - -func (s *fakeStats) SelfStats() []byte { return nil } -func (s *fakeStats) LeaderStats() []byte { return nil } -func (s *fakeStats) StoreStats() []byte { return nil } - -type v2v3Server struct { - lg *zap.Logger - c *clientv3.Client - store *v2v3Store - fakeStats -} - -func NewServer(lg *zap.Logger, c *clientv3.Client, pfx string) etcdserver.ServerPeer { - return &v2v3Server{lg: lg, c: c, store: newStore(c, pfx)} -} - -func (s *v2v3Server) ClientCertAuthEnabled() bool { return false } - -func (s *v2v3Server) LeaseHandler() http.Handler { panic("STUB: lease handler") } -func (s *v2v3Server) RaftHandler() http.Handler { panic("STUB: raft handler") } - -func (s *v2v3Server) Leader() types.ID { - ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Second) - defer cancel() - resp, err := s.c.Status(ctx, s.c.Endpoints()[0]) - if err != nil { - return 0 - } - return types.ID(resp.Leader) -} - -func (s *v2v3Server) AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) { - // adding member as learner is not supported by V2 Server. - resp, err := s.c.MemberAdd(ctx, memb.PeerURLs) - if err != nil { - return nil, err - } - return v3MembersToMembership(resp.Members), nil -} - -func (s *v2v3Server) RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error) { - resp, err := s.c.MemberRemove(ctx, id) - if err != nil { - return nil, err - } - return v3MembersToMembership(resp.Members), nil -} - -func (s *v2v3Server) PromoteMember(ctx context.Context, id uint64) ([]*membership.Member, error) { - resp, err := s.c.MemberPromote(ctx, id) - if err != nil { - return nil, err - } - return v3MembersToMembership(resp.Members), nil -} - -func (s *v2v3Server) UpdateMember(ctx context.Context, m membership.Member) ([]*membership.Member, error) { - resp, err := s.c.MemberUpdate(ctx, uint64(m.ID), m.PeerURLs) - if err != nil { - return nil, err - } - return v3MembersToMembership(resp.Members), nil -} - -func v3MembersToMembership(v3membs []*pb.Member) []*membership.Member { - membs := make([]*membership.Member, len(v3membs)) - for i, m := range v3membs { - membs[i] = &membership.Member{ - ID: types.ID(m.ID), - RaftAttributes: membership.RaftAttributes{ - PeerURLs: m.PeerURLs, - IsLearner: m.IsLearner, - }, - Attributes: membership.Attributes{ - Name: m.Name, - ClientURLs: m.ClientURLs, - }, - } - } - return membs -} - -func (s *v2v3Server) ClusterVersion() *semver.Version { return s.Version() } -func (s *v2v3Server) Cluster() api.Cluster { return s } -func (s *v2v3Server) Alarms() []*pb.AlarmMember { return nil } -func (s *v2v3Server) LeaderChangedNotify() <-chan struct{} { return nil } - -func (s *v2v3Server) Do(ctx context.Context, r pb.Request) (etcdserver.Response, error) { - applier := etcdserver.NewApplierV2(s.lg, s.store, nil) - reqHandler := etcdserver.NewStoreRequestV2Handler(s.store, applier) - req := (*etcdserver.RequestV2)(&r) - resp, err := req.Handle(ctx, reqHandler) - if resp.Err != nil { - return resp, resp.Err - } - return resp, err -} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2v3/store.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2v3/store.go deleted file mode 100644 index 6d78cab71989c..0000000000000 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2v3/store.go +++ /dev/null @@ -1,638 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2v3 - -import ( - "context" - "fmt" - "path" - "sort" - "strings" - "time" - - "go.etcd.io/etcd/api/v3/mvccpb" - "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/client/v3/concurrency" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2error" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" -) - -// store implements the Store interface for V2 using -// a v3 client. -type v2v3Store struct { - c *clientv3.Client - // pfx is the v3 prefix where keys should be stored. - pfx string - ctx context.Context -} - -const maxPathDepth = 63 - -var errUnsupported = fmt.Errorf("TTLs are unsupported") - -func NewStore(c *clientv3.Client, pfx string) v2store.Store { return newStore(c, pfx) } - -func newStore(c *clientv3.Client, pfx string) *v2v3Store { return &v2v3Store{c, pfx, c.Ctx()} } - -func (s *v2v3Store) Index() uint64 { panic("STUB") } - -func (s *v2v3Store) Get(nodePath string, recursive, sorted bool) (*v2store.Event, error) { - key := s.mkPath(nodePath) - resp, err := s.c.Txn(s.ctx).Then( - clientv3.OpGet(key+"/"), - clientv3.OpGet(key), - ).Commit() - if err != nil { - return nil, err - } - - if kvs := resp.Responses[0].GetResponseRange().Kvs; len(kvs) != 0 || isRoot(nodePath) { - nodes, err := s.getDir(nodePath, recursive, sorted, resp.Header.Revision) - if err != nil { - return nil, err - } - cidx, midx := uint64(0), uint64(0) - if len(kvs) > 0 { - cidx, midx = mkV2Rev(kvs[0].CreateRevision), mkV2Rev(kvs[0].ModRevision) - } - return &v2store.Event{ - Action: v2store.Get, - Node: &v2store.NodeExtern{ - Key: nodePath, - Dir: true, - Nodes: nodes, - CreatedIndex: cidx, - ModifiedIndex: midx, - }, - EtcdIndex: mkV2Rev(resp.Header.Revision), - }, nil - } - - kvs := resp.Responses[1].GetResponseRange().Kvs - if len(kvs) == 0 { - return nil, v2error.NewError(v2error.EcodeKeyNotFound, nodePath, mkV2Rev(resp.Header.Revision)) - } - - return &v2store.Event{ - Action: v2store.Get, - Node: s.mkV2Node(kvs[0]), - EtcdIndex: mkV2Rev(resp.Header.Revision), - }, nil -} - -func (s *v2v3Store) getDir(nodePath string, recursive, sorted bool, rev int64) ([]*v2store.NodeExtern, error) { - rootNodes, err := s.getDirDepth(nodePath, 1, rev) - if err != nil || !recursive { - if sorted { - sort.Sort(v2store.NodeExterns(rootNodes)) - } - return rootNodes, err - } - nextNodes := rootNodes - nodes := make(map[string]*v2store.NodeExtern) - // Breadth walk the subdirectories - for i := 2; len(nextNodes) > 0; i++ { - for _, n := range nextNodes { - nodes[n.Key] = n - if parent := nodes[path.Dir(n.Key)]; parent != nil { - parent.Nodes = append(parent.Nodes, n) - } - } - if nextNodes, err = s.getDirDepth(nodePath, i, rev); err != nil { - return nil, err - } - } - - if sorted { - sort.Sort(v2store.NodeExterns(rootNodes)) - } - return rootNodes, nil -} - -func (s *v2v3Store) getDirDepth(nodePath string, depth int, rev int64) ([]*v2store.NodeExtern, error) { - pd := s.mkPathDepth(nodePath, depth) - resp, err := s.c.Get(s.ctx, pd, clientv3.WithPrefix(), clientv3.WithRev(rev)) - if err != nil { - return nil, err - } - - nodes := make([]*v2store.NodeExtern, len(resp.Kvs)) - for i, kv := range resp.Kvs { - nodes[i] = s.mkV2Node(kv) - } - return nodes, nil -} - -func (s *v2v3Store) Set( - nodePath string, - dir bool, - value string, - expireOpts v2store.TTLOptionSet, -) (*v2store.Event, error) { - if expireOpts.Refresh || !expireOpts.ExpireTime.IsZero() { - return nil, errUnsupported - } - - if isRoot(nodePath) { - return nil, v2error.NewError(v2error.EcodeRootROnly, nodePath, 0) - } - - ecode := 0 - applyf := func(stm concurrency.STM) error { - // build path if any directories in path do not exist - dirs := []string{} - for p := path.Dir(nodePath); !isRoot(p); p = path.Dir(p) { - pp := s.mkPath(p) - if stm.Rev(pp) > 0 { - ecode = v2error.EcodeNotDir - return nil - } - if stm.Rev(pp+"/") == 0 { - dirs = append(dirs, pp+"/") - } - } - for _, d := range dirs { - stm.Put(d, "") - } - - key := s.mkPath(nodePath) - if dir { - if stm.Rev(key) != 0 { - // exists as non-dir - ecode = v2error.EcodeNotDir - return nil - } - key = key + "/" - } else if stm.Rev(key+"/") != 0 { - ecode = v2error.EcodeNotFile - return nil - } - stm.Put(key, value, clientv3.WithPrevKV()) - stm.Put(s.mkActionKey(), v2store.Set) - return nil - } - - resp, err := s.newSTM(applyf) - if err != nil { - return nil, err - } - if ecode != 0 { - return nil, v2error.NewError(ecode, nodePath, mkV2Rev(resp.Header.Revision)) - } - - createRev := resp.Header.Revision - var pn *v2store.NodeExtern - if pkv := prevKeyFromPuts(resp); pkv != nil { - pn = s.mkV2Node(pkv) - createRev = pkv.CreateRevision - } - - vp := &value - if dir { - vp = nil - } - return &v2store.Event{ - Action: v2store.Set, - Node: &v2store.NodeExtern{ - Key: nodePath, - Value: vp, - Dir: dir, - ModifiedIndex: mkV2Rev(resp.Header.Revision), - CreatedIndex: mkV2Rev(createRev), - }, - PrevNode: pn, - EtcdIndex: mkV2Rev(resp.Header.Revision), - }, nil -} - -func (s *v2v3Store) Update(nodePath, newValue string, expireOpts v2store.TTLOptionSet) (*v2store.Event, error) { - if isRoot(nodePath) { - return nil, v2error.NewError(v2error.EcodeRootROnly, nodePath, 0) - } - - if expireOpts.Refresh || !expireOpts.ExpireTime.IsZero() { - return nil, errUnsupported - } - - key := s.mkPath(nodePath) - ecode := 0 - applyf := func(stm concurrency.STM) error { - if rev := stm.Rev(key + "/"); rev != 0 { - ecode = v2error.EcodeNotFile - return nil - } - if rev := stm.Rev(key); rev == 0 { - ecode = v2error.EcodeKeyNotFound - return nil - } - stm.Put(key, newValue, clientv3.WithPrevKV()) - stm.Put(s.mkActionKey(), v2store.Update) - return nil - } - - resp, err := s.newSTM(applyf) - if err != nil { - return nil, err - } - if ecode != 0 { - return nil, v2error.NewError(v2error.EcodeNotFile, nodePath, mkV2Rev(resp.Header.Revision)) - } - - pkv := prevKeyFromPuts(resp) - return &v2store.Event{ - Action: v2store.Update, - Node: &v2store.NodeExtern{ - Key: nodePath, - Value: &newValue, - ModifiedIndex: mkV2Rev(resp.Header.Revision), - CreatedIndex: mkV2Rev(pkv.CreateRevision), - }, - PrevNode: s.mkV2Node(pkv), - EtcdIndex: mkV2Rev(resp.Header.Revision), - }, nil -} - -func (s *v2v3Store) Create( - nodePath string, - dir bool, - value string, - unique bool, - expireOpts v2store.TTLOptionSet, -) (*v2store.Event, error) { - if isRoot(nodePath) { - return nil, v2error.NewError(v2error.EcodeRootROnly, nodePath, 0) - } - if expireOpts.Refresh || !expireOpts.ExpireTime.IsZero() { - return nil, errUnsupported - } - ecode := 0 - applyf := func(stm concurrency.STM) error { - ecode = 0 - key := s.mkPath(nodePath) - if unique { - // append unique item under the node path - for { - key = nodePath + "/" + fmt.Sprintf("%020s", time.Now()) - key = path.Clean(path.Join("/", key)) - key = s.mkPath(key) - if stm.Rev(key) == 0 { - break - } - } - } - if stm.Rev(key) > 0 || stm.Rev(key+"/") > 0 { - ecode = v2error.EcodeNodeExist - return nil - } - // build path if any directories in path do not exist - dirs := []string{} - for p := path.Dir(nodePath); !isRoot(p); p = path.Dir(p) { - pp := s.mkPath(p) - if stm.Rev(pp) > 0 { - ecode = v2error.EcodeNotDir - return nil - } - if stm.Rev(pp+"/") == 0 { - dirs = append(dirs, pp+"/") - } - } - for _, d := range dirs { - stm.Put(d, "") - } - - if dir { - // directories marked with extra slash in key name - key += "/" - } - stm.Put(key, value) - stm.Put(s.mkActionKey(), v2store.Create) - return nil - } - - resp, err := s.newSTM(applyf) - if err != nil { - return nil, err - } - if ecode != 0 { - return nil, v2error.NewError(ecode, nodePath, mkV2Rev(resp.Header.Revision)) - } - - var v *string - if !dir { - v = &value - } - - return &v2store.Event{ - Action: v2store.Create, - Node: &v2store.NodeExtern{ - Key: nodePath, - Value: v, - Dir: dir, - ModifiedIndex: mkV2Rev(resp.Header.Revision), - CreatedIndex: mkV2Rev(resp.Header.Revision), - }, - EtcdIndex: mkV2Rev(resp.Header.Revision), - }, nil -} - -func (s *v2v3Store) CompareAndSwap( - nodePath string, - prevValue string, - prevIndex uint64, - value string, - expireOpts v2store.TTLOptionSet, -) (*v2store.Event, error) { - if isRoot(nodePath) { - return nil, v2error.NewError(v2error.EcodeRootROnly, nodePath, 0) - } - if expireOpts.Refresh || !expireOpts.ExpireTime.IsZero() { - return nil, errUnsupported - } - - key := s.mkPath(nodePath) - resp, err := s.c.Txn(s.ctx).If( - s.mkCompare(nodePath, prevValue, prevIndex)..., - ).Then( - clientv3.OpPut(key, value, clientv3.WithPrevKV()), - clientv3.OpPut(s.mkActionKey(), v2store.CompareAndSwap), - ).Else( - clientv3.OpGet(key), - clientv3.OpGet(key+"/"), - ).Commit() - - if err != nil { - return nil, err - } - if !resp.Succeeded { - return nil, compareFail(nodePath, prevValue, prevIndex, resp) - } - - pkv := resp.Responses[0].GetResponsePut().PrevKv - return &v2store.Event{ - Action: v2store.CompareAndSwap, - Node: &v2store.NodeExtern{ - Key: nodePath, - Value: &value, - CreatedIndex: mkV2Rev(pkv.CreateRevision), - ModifiedIndex: mkV2Rev(resp.Header.Revision), - }, - PrevNode: s.mkV2Node(pkv), - EtcdIndex: mkV2Rev(resp.Header.Revision), - }, nil -} - -func (s *v2v3Store) Delete(nodePath string, dir, recursive bool) (*v2store.Event, error) { - if isRoot(nodePath) { - return nil, v2error.NewError(v2error.EcodeRootROnly, nodePath, 0) - } - if !dir && !recursive { - return s.deleteNode(nodePath) - } - if !recursive { - return s.deleteEmptyDir(nodePath) - } - - dels := make([]clientv3.Op, maxPathDepth+1) - dels[0] = clientv3.OpDelete(s.mkPath(nodePath)+"/", clientv3.WithPrevKV()) - for i := 1; i < maxPathDepth; i++ { - dels[i] = clientv3.OpDelete(s.mkPathDepth(nodePath, i), clientv3.WithPrefix()) - } - dels[maxPathDepth] = clientv3.OpPut(s.mkActionKey(), v2store.Delete) - - resp, err := s.c.Txn(s.ctx).If( - clientv3.Compare(clientv3.Version(s.mkPath(nodePath)+"/"), ">", 0), - clientv3.Compare(clientv3.Version(s.mkPathDepth(nodePath, maxPathDepth)+"/"), "=", 0), - ).Then( - dels..., - ).Commit() - if err != nil { - return nil, err - } - if !resp.Succeeded { - return nil, v2error.NewError(v2error.EcodeNodeExist, nodePath, mkV2Rev(resp.Header.Revision)) - } - dresp := resp.Responses[0].GetResponseDeleteRange() - return &v2store.Event{ - Action: v2store.Delete, - PrevNode: s.mkV2Node(dresp.PrevKvs[0]), - EtcdIndex: mkV2Rev(resp.Header.Revision), - }, nil -} - -func (s *v2v3Store) deleteEmptyDir(nodePath string) (*v2store.Event, error) { - resp, err := s.c.Txn(s.ctx).If( - clientv3.Compare(clientv3.Version(s.mkPathDepth(nodePath, 1)), "=", 0).WithPrefix(), - ).Then( - clientv3.OpDelete(s.mkPath(nodePath)+"/", clientv3.WithPrevKV()), - clientv3.OpPut(s.mkActionKey(), v2store.Delete), - ).Commit() - if err != nil { - return nil, err - } - if !resp.Succeeded { - return nil, v2error.NewError(v2error.EcodeDirNotEmpty, nodePath, mkV2Rev(resp.Header.Revision)) - } - dresp := resp.Responses[0].GetResponseDeleteRange() - if len(dresp.PrevKvs) == 0 { - return nil, v2error.NewError(v2error.EcodeNodeExist, nodePath, mkV2Rev(resp.Header.Revision)) - } - return &v2store.Event{ - Action: v2store.Delete, - PrevNode: s.mkV2Node(dresp.PrevKvs[0]), - EtcdIndex: mkV2Rev(resp.Header.Revision), - }, nil -} - -func (s *v2v3Store) deleteNode(nodePath string) (*v2store.Event, error) { - resp, err := s.c.Txn(s.ctx).If( - clientv3.Compare(clientv3.Version(s.mkPath(nodePath)+"/"), "=", 0), - ).Then( - clientv3.OpDelete(s.mkPath(nodePath), clientv3.WithPrevKV()), - clientv3.OpPut(s.mkActionKey(), v2store.Delete), - ).Commit() - if err != nil { - return nil, err - } - if !resp.Succeeded { - return nil, v2error.NewError(v2error.EcodeNotFile, nodePath, mkV2Rev(resp.Header.Revision)) - } - pkvs := resp.Responses[0].GetResponseDeleteRange().PrevKvs - if len(pkvs) == 0 { - return nil, v2error.NewError(v2error.EcodeKeyNotFound, nodePath, mkV2Rev(resp.Header.Revision)) - } - pkv := pkvs[0] - return &v2store.Event{ - Action: v2store.Delete, - Node: &v2store.NodeExtern{ - Key: nodePath, - CreatedIndex: mkV2Rev(pkv.CreateRevision), - ModifiedIndex: mkV2Rev(resp.Header.Revision), - }, - PrevNode: s.mkV2Node(pkv), - EtcdIndex: mkV2Rev(resp.Header.Revision), - }, nil -} - -func (s *v2v3Store) CompareAndDelete(nodePath, prevValue string, prevIndex uint64) (*v2store.Event, error) { - if isRoot(nodePath) { - return nil, v2error.NewError(v2error.EcodeRootROnly, nodePath, 0) - } - - key := s.mkPath(nodePath) - resp, err := s.c.Txn(s.ctx).If( - s.mkCompare(nodePath, prevValue, prevIndex)..., - ).Then( - clientv3.OpDelete(key, clientv3.WithPrevKV()), - clientv3.OpPut(s.mkActionKey(), v2store.CompareAndDelete), - ).Else( - clientv3.OpGet(key), - clientv3.OpGet(key+"/"), - ).Commit() - - if err != nil { - return nil, err - } - if !resp.Succeeded { - return nil, compareFail(nodePath, prevValue, prevIndex, resp) - } - - // len(pkvs) > 1 since txn only succeeds when key exists - pkv := resp.Responses[0].GetResponseDeleteRange().PrevKvs[0] - return &v2store.Event{ - Action: v2store.CompareAndDelete, - Node: &v2store.NodeExtern{ - Key: nodePath, - CreatedIndex: mkV2Rev(pkv.CreateRevision), - ModifiedIndex: mkV2Rev(resp.Header.Revision), - }, - PrevNode: s.mkV2Node(pkv), - EtcdIndex: mkV2Rev(resp.Header.Revision), - }, nil -} - -func compareFail(nodePath, prevValue string, prevIndex uint64, resp *clientv3.TxnResponse) error { - if dkvs := resp.Responses[1].GetResponseRange().Kvs; len(dkvs) > 0 { - return v2error.NewError(v2error.EcodeNotFile, nodePath, mkV2Rev(resp.Header.Revision)) - } - kvs := resp.Responses[0].GetResponseRange().Kvs - if len(kvs) == 0 { - return v2error.NewError(v2error.EcodeKeyNotFound, nodePath, mkV2Rev(resp.Header.Revision)) - } - kv := kvs[0] - indexMatch := prevIndex == 0 || kv.ModRevision == int64(prevIndex) - valueMatch := prevValue == "" || string(kv.Value) == prevValue - var cause string - switch { - case indexMatch && !valueMatch: - cause = fmt.Sprintf("[%v != %v]", prevValue, string(kv.Value)) - case valueMatch && !indexMatch: - cause = fmt.Sprintf("[%v != %v]", prevIndex, kv.ModRevision) - default: - cause = fmt.Sprintf("[%v != %v] [%v != %v]", prevValue, string(kv.Value), prevIndex, kv.ModRevision) - } - return v2error.NewError(v2error.EcodeTestFailed, cause, mkV2Rev(resp.Header.Revision)) -} - -func (s *v2v3Store) mkCompare(nodePath, prevValue string, prevIndex uint64) []clientv3.Cmp { - key := s.mkPath(nodePath) - cmps := []clientv3.Cmp{clientv3.Compare(clientv3.Version(key), ">", 0)} - if prevIndex != 0 { - cmps = append(cmps, clientv3.Compare(clientv3.ModRevision(key), "=", mkV3Rev(prevIndex))) - } - if prevValue != "" { - cmps = append(cmps, clientv3.Compare(clientv3.Value(key), "=", prevValue)) - } - return cmps -} - -func (s *v2v3Store) JsonStats() []byte { panic("STUB") } -func (s *v2v3Store) DeleteExpiredKeys(cutoff time.Time) { panic("STUB") } - -func (s *v2v3Store) Version() int { return 2 } - -// TODO: move this out of the Store interface? - -func (s *v2v3Store) Save() ([]byte, error) { panic("STUB") } -func (s *v2v3Store) Recovery(state []byte) error { panic("STUB") } -func (s *v2v3Store) Clone() v2store.Store { panic("STUB") } -func (s *v2v3Store) SaveNoCopy() ([]byte, error) { panic("STUB") } -func (s *v2v3Store) HasTTLKeys() bool { panic("STUB") } - -func (s *v2v3Store) mkPath(nodePath string) string { return s.mkPathDepth(nodePath, 0) } - -func (s *v2v3Store) mkNodePath(p string) string { - return path.Clean(p[len(s.pfx)+len("/k/000/"):]) -} - -// mkPathDepth makes a path to a key that encodes its directory depth -// for fast directory listing. If a depth is provided, it is added -// to the computed depth. -func (s *v2v3Store) mkPathDepth(nodePath string, depth int) string { - normalForm := path.Clean(path.Join("/", nodePath)) - n := strings.Count(normalForm, "/") + depth - return fmt.Sprintf("%s/%03d/k/%s", s.pfx, n, normalForm) -} - -func (s *v2v3Store) mkActionKey() string { return s.pfx + "/act" } - -func isRoot(s string) bool { return len(s) == 0 || s == "/" || s == "/0" || s == "/1" } - -func mkV2Rev(v3Rev int64) uint64 { - if v3Rev == 0 { - return 0 - } - return uint64(v3Rev - 1) -} - -func mkV3Rev(v2Rev uint64) int64 { - if v2Rev == 0 { - return 0 - } - return int64(v2Rev + 1) -} - -// mkV2Node creates a V2 NodeExtern from a V3 KeyValue -func (s *v2v3Store) mkV2Node(kv *mvccpb.KeyValue) *v2store.NodeExtern { - if kv == nil { - return nil - } - n := &v2store.NodeExtern{ - Key: s.mkNodePath(string(kv.Key)), - Dir: kv.Key[len(kv.Key)-1] == '/', - CreatedIndex: mkV2Rev(kv.CreateRevision), - ModifiedIndex: mkV2Rev(kv.ModRevision), - } - if !n.Dir { - v := string(kv.Value) - n.Value = &v - } - return n -} - -// prevKeyFromPuts gets the prev key that is being put; ignores -// the put action response. -func prevKeyFromPuts(resp *clientv3.TxnResponse) *mvccpb.KeyValue { - for _, r := range resp.Responses { - pkv := r.GetResponsePut().PrevKv - if pkv != nil && pkv.CreateRevision > 0 { - return pkv - } - } - return nil -} - -func (s *v2v3Store) newSTM(applyf func(concurrency.STM) error) (*clientv3.TxnResponse, error) { - return concurrency.NewSTM(s.c, applyf, concurrency.WithIsolation(concurrency.Serializable)) -} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2v3/watcher.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2v3/watcher.go deleted file mode 100644 index 046c25d450952..0000000000000 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2v3/watcher.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2v3 - -import ( - "context" - "strings" - - "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2error" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" -) - -func (s *v2v3Store) Watch(prefix string, recursive, stream bool, sinceIndex uint64) (v2store.Watcher, error) { - ctx, cancel := context.WithCancel(s.ctx) - wch := s.c.Watch( - ctx, - // TODO: very pricey; use a single store-wide watch in future - s.pfx, - clientv3.WithPrefix(), - clientv3.WithRev(int64(sinceIndex)), - clientv3.WithCreatedNotify(), - clientv3.WithPrevKV()) - resp, ok := <-wch - if err := resp.Err(); err != nil || !ok { - cancel() - return nil, v2error.NewError(v2error.EcodeRaftInternal, prefix, 0) - } - - evc, donec := make(chan *v2store.Event), make(chan struct{}) - go func() { - defer func() { - close(evc) - close(donec) - }() - for resp := range wch { - for _, ev := range s.mkV2Events(resp) { - k := ev.Node.Key - if recursive { - if !strings.HasPrefix(k, prefix) { - continue - } - // accept events on hidden keys given in prefix - k = strings.Replace(k, prefix, "/", 1) - // ignore hidden keys deeper than prefix - if strings.Contains(k, "/_") { - continue - } - } - if !recursive && k != prefix { - continue - } - select { - case evc <- ev: - case <-ctx.Done(): - return - } - if !stream { - return - } - } - } - }() - - return &v2v3Watcher{ - startRev: resp.Header.Revision, - evc: evc, - donec: donec, - cancel: cancel, - }, nil -} - -func (s *v2v3Store) mkV2Events(wr clientv3.WatchResponse) (evs []*v2store.Event) { - ak := s.mkActionKey() - for _, rev := range mkRevs(wr) { - var act, key *clientv3.Event - for _, ev := range rev { - if string(ev.Kv.Key) == ak { - act = ev - } else if key != nil && len(key.Kv.Key) < len(ev.Kv.Key) { - // use longest key to ignore intermediate new - // directories from Create. - key = ev - } else if key == nil { - key = ev - } - } - if act != nil && act.Kv != nil && key != nil { - v2ev := &v2store.Event{ - Action: string(act.Kv.Value), - Node: s.mkV2Node(key.Kv), - PrevNode: s.mkV2Node(key.PrevKv), - EtcdIndex: mkV2Rev(wr.Header.Revision), - } - evs = append(evs, v2ev) - } - } - return evs -} - -func mkRevs(wr clientv3.WatchResponse) (revs [][]*clientv3.Event) { - var curRev []*clientv3.Event - for _, ev := range wr.Events { - if curRev != nil && ev.Kv.ModRevision != curRev[0].Kv.ModRevision { - revs = append(revs, curRev) - curRev = nil - } - curRev = append(curRev, ev) - } - if curRev != nil { - revs = append(revs, curRev) - } - return revs -} - -type v2v3Watcher struct { - startRev int64 - evc chan *v2store.Event - donec chan struct{} - cancel context.CancelFunc -} - -func (w *v2v3Watcher) StartIndex() uint64 { return mkV2Rev(w.startRev) } - -func (w *v2v3Watcher) Remove() { - w.cancel() - <-w.donec -} - -func (w *v2v3Watcher) EventChan() chan *v2store.Event { return w.evc } diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3alarm/alarms.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3alarm/alarms.go index 5d4a641cea000..e0480da081c16 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3alarm/alarms.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3alarm/alarms.go @@ -18,18 +18,25 @@ package v3alarm import ( "sync" + "go.uber.org/zap" + pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/server/v3/mvcc/backend" - "go.etcd.io/etcd/server/v3/mvcc/buckets" - - "go.uber.org/zap" + "go.etcd.io/etcd/server/v3/storage/backend" ) type BackendGetter interface { Backend() backend.Backend } +type AlarmBackend interface { + CreateAlarmBucket() + MustPutAlarm(member *pb.AlarmMember) + MustDeleteAlarm(alarm *pb.AlarmMember) + GetAllAlarms() ([]*pb.AlarmMember, error) + ForceCommit() +} + type alarmSet map[types.ID]*pb.AlarmMember // AlarmStore persists alarms to the backend. @@ -38,14 +45,14 @@ type AlarmStore struct { mu sync.Mutex types map[pb.AlarmType]alarmSet - bg BackendGetter + be AlarmBackend } -func NewAlarmStore(lg *zap.Logger, bg BackendGetter) (*AlarmStore, error) { +func NewAlarmStore(lg *zap.Logger, be AlarmBackend) (*AlarmStore, error) { if lg == nil { lg = zap.NewNop() } - ret := &AlarmStore{lg: lg, types: make(map[pb.AlarmType]alarmSet), bg: bg} + ret := &AlarmStore{lg: lg, types: make(map[pb.AlarmType]alarmSet), be: be} err := ret.restore() return ret, err } @@ -59,16 +66,7 @@ func (a *AlarmStore) Activate(id types.ID, at pb.AlarmType) *pb.AlarmMember { return m } - v, err := newAlarm.Marshal() - if err != nil { - a.lg.Panic("failed to marshal alarm member", zap.Error(err)) - } - - b := a.bg.Backend() - b.BatchTx().LockInsideApply() - b.BatchTx().UnsafePut(buckets.Alarm, v, nil) - b.BatchTx().Unlock() - + a.be.MustPutAlarm(newAlarm) return newAlarm } @@ -88,16 +86,7 @@ func (a *AlarmStore) Deactivate(id types.ID, at pb.AlarmType) *pb.AlarmMember { delete(t, id) - v, err := m.Marshal() - if err != nil { - a.lg.Panic("failed to marshal alarm member", zap.Error(err)) - } - - b := a.bg.Backend() - b.BatchTx().LockInsideApply() - b.BatchTx().UnsafeDelete(buckets.Alarm, v) - b.BatchTx().Unlock() - + a.be.MustDeleteAlarm(m) return m } @@ -119,22 +108,15 @@ func (a *AlarmStore) Get(at pb.AlarmType) (ret []*pb.AlarmMember) { } func (a *AlarmStore) restore() error { - b := a.bg.Backend() - tx := b.BatchTx() - - tx.LockOutsideApply() - tx.UnsafeCreateBucket(buckets.Alarm) - err := tx.UnsafeForEach(buckets.Alarm, func(k, v []byte) error { - var m pb.AlarmMember - if err := m.Unmarshal(k); err != nil { - return err - } - a.addToMap(&m) - return nil - }) - tx.Unlock() - - b.ForceCommit() + a.be.CreateAlarmBucket() + ms, err := a.be.GetAllAlarms() + if err != nil { + return err + } + for _, m := range ms { + a.addToMap(m) + } + a.be.ForceCommit() return err } diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3client/v3client.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3client/v3client.go index de0da81afbd39..b9d18399f2d0d 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3client/v3client.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3client/v3client.go @@ -18,7 +18,7 @@ import ( "context" "time" - "go.etcd.io/etcd/client/v3" + clientv3 "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/server/v3/etcdserver" "go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc" "go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter" diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3compactor/compactor.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3compactor/compactor.go index e352670c12bb1..f916e71141bf3 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3compactor/compactor.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3compactor/compactor.go @@ -19,10 +19,10 @@ import ( "fmt" "time" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "github.com/jonboulle/clockwork" "go.uber.org/zap" + + pb "go.etcd.io/etcd/api/v3/etcdserverpb" ) const ( diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3compactor/periodic.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3compactor/periodic.go index 083c72ede2432..067f69ec36dca 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3compactor/periodic.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3compactor/periodic.go @@ -16,14 +16,15 @@ package v3compactor import ( "context" + "errors" "sync" "time" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/server/v3/mvcc" - "github.com/jonboulle/clockwork" "go.uber.org/zap" + + pb "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.etcd.io/etcd/server/v3/storage/mvcc" ) // Periodic compacts the log by purging revisions older than @@ -54,8 +55,9 @@ func newPeriodic(lg *zap.Logger, clock clockwork.Clock, h time.Duration, rg RevG period: h, rg: rg, c: c, - revs: make([]int64, 0), } + // revs won't be longer than the retentions. + pc.revs = make([]int64, 0, pc.getRetentions()) pc.ctx, pc.cancel = context.WithCancel(context.Background()) return pc } @@ -66,7 +68,7 @@ Compaction period 1-hour: 2. record revisions for every 1/10 of 1-hour (6-minute) 3. keep recording revisions with no compaction for first 1-hour 4. do compact with revs[0] - - success? contiue on for-loop and move sliding window; revs = revs[1:] + - success? continue on for-loop and move sliding window; revs = revs[1:] - failure? update revs, and retry after 1/10 of 1-hour (6-minute) Compaction period 24-hour: @@ -74,7 +76,7 @@ Compaction period 24-hour: 2. record revisions for every 1/10 of 1-hour (6-minute) 3. keep recording revisions with no compaction for first 24-hour 4. do compact with revs[0] - - success? contiue on for-loop and move sliding window; revs = revs[1:] + - success? continue on for-loop and move sliding window; revs = revs[1:] - failure? update revs, and retry after 1/10 of 1-hour (6-minute) Compaction period 59-min: @@ -82,7 +84,7 @@ Compaction period 59-min: 2. record revisions for every 1/10 of 59-min (5.9-min) 3. keep recording revisions with no compaction for first 59-min 4. do compact with revs[0] - - success? contiue on for-loop and move sliding window; revs = revs[1:] + - success? continue on for-loop and move sliding window; revs = revs[1:] - failure? update revs, and retry after 1/10 of 59-min (5.9-min) Compaction period 5-sec: @@ -90,7 +92,7 @@ Compaction period 5-sec: 2. record revisions for every 1/10 of 5-sec (0.5-sec) 3. keep recording revisions with no compaction for first 5-sec 4. do compact with revs[0] - - success? contiue on for-loop and move sliding window; revs = revs[1:] + - success? continue on for-loop and move sliding window; revs = revs[1:] - failure? update revs, and retry after 1/10 of 5-sec (0.5-sec) */ @@ -101,6 +103,7 @@ func (pc *Periodic) Run() { retentions := pc.getRetentions() go func() { + lastRevision := int64(0) lastSuccess := pc.clock.Now() baseInterval := pc.period for { @@ -113,15 +116,15 @@ func (pc *Periodic) Run() { case <-pc.ctx.Done(): return case <-pc.clock.After(retryInterval): - pc.mu.Lock() + pc.mu.RLock() p := pc.paused - pc.mu.Unlock() + pc.mu.RUnlock() if p { continue } } - - if pc.clock.Now().Sub(lastSuccess) < baseInterval { + rev := pc.revs[0] + if pc.clock.Now().Sub(lastSuccess) < baseInterval || rev == lastRevision { continue } @@ -129,7 +132,6 @@ func (pc *Periodic) Run() { if baseInterval == pc.period { baseInterval = compactInterval } - rev := pc.revs[0] pc.lg.Info( "starting auto periodic compaction", @@ -138,13 +140,14 @@ func (pc *Periodic) Run() { ) startTime := pc.clock.Now() _, err := pc.c.Compact(pc.ctx, &pb.CompactionRequest{Revision: rev}) - if err == nil || err == mvcc.ErrCompacted { + if err == nil || errors.Is(err, mvcc.ErrCompacted) { pc.lg.Info( "completed auto periodic compaction", zap.Int64("revision", rev), zap.Duration("compact-period", pc.period), zap.Duration("took", pc.clock.Now().Sub(startTime)), ) + lastRevision = rev lastSuccess = pc.clock.Now() } else { pc.lg.Warn( diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3compactor/revision.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3compactor/revision.go index 83be62794787b..41748616903d4 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3compactor/revision.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3compactor/revision.go @@ -16,14 +16,15 @@ package v3compactor import ( "context" + "errors" "sync" "time" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/server/v3/mvcc" - "github.com/jonboulle/clockwork" "go.uber.org/zap" + + pb "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.etcd.io/etcd/server/v3/storage/mvcc" ) // Revision compacts the log by purging revisions older than @@ -89,7 +90,7 @@ func (rc *Revision) Run() { zap.Int64("revision-compaction-retention", rc.retention), ) _, err := rc.c.Compact(rc.ctx, &pb.CompactionRequest{Revision: rev}) - if err == nil || err == mvcc.ErrCompacted { + if err == nil || errors.Is(err, mvcc.ErrCompacted) { prev = rev rc.lg.Info( "completed auto revision compaction", diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3discovery/discovery.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3discovery/discovery.go new file mode 100644 index 0000000000000..028876cd1906f --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3discovery/discovery.go @@ -0,0 +1,508 @@ +// Copyright 2022 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package v3discovery provides an implementation of the cluster discovery that +// is used by etcd with v3 client. +package v3discovery + +import ( + "context" + "errors" + "math" + "path" + "sort" + "strconv" + "strings" + "time" + + "github.com/jonboulle/clockwork" + "go.uber.org/zap" + + "go.etcd.io/etcd/client/pkg/v3/types" + clientv3 "go.etcd.io/etcd/client/v3" +) + +const ( + discoveryPrefix = "/_etcd/registry" +) + +var ( + ErrInvalidURL = errors.New("discovery: invalid peer URL") + ErrBadSizeKey = errors.New("discovery: size key is bad") + ErrSizeNotFound = errors.New("discovery: size key not found") + ErrFullCluster = errors.New("discovery: cluster is full") + ErrTooManyRetries = errors.New("discovery: too many retries") +) + +var ( + // Number of retries discovery will attempt before giving up and error out. + nRetries = uint(math.MaxUint32) + maxExponentialRetries = uint(8) +) + +type DiscoveryConfig struct { + clientv3.ConfigSpec `json:"client"` + Token string `json:"token"` +} + +type memberInfo struct { + // peerRegKey is the key used by the member when registering in the + // discovery service. + // Format: "/_etcd/registry//members/". + peerRegKey string + // peerURLsMap format: "peerName=peerURLs", i.e., "member1=http://127.0.0.1:2380". + peerURLsMap string + // createRev is the member's CreateRevision in the etcd cluster backing + // the discovery service. + createRev int64 +} + +type clusterInfo struct { + clusterToken string + members []memberInfo +} + +// key prefix for each cluster: "/_etcd/registry/". +func getClusterKeyPrefix(cluster string) string { + return path.Join(discoveryPrefix, cluster) +} + +// key format for cluster size: "/_etcd/registry//_config/size". +func getClusterSizeKey(cluster string) string { + return path.Join(getClusterKeyPrefix(cluster), "_config/size") +} + +// key prefix for each member: "/_etcd/registry//members". +func getMemberKeyPrefix(clusterToken string) string { + return path.Join(getClusterKeyPrefix(clusterToken), "members") +} + +// key format for each member: "/_etcd/registry//members/". +func getMemberKey(cluster, memberID string) string { + return path.Join(getMemberKeyPrefix(cluster), memberID) +} + +// GetCluster will connect to the discovery service at the given endpoints and +// retrieve a string describing the cluster +func GetCluster(lg *zap.Logger, cfg *DiscoveryConfig) (cs string, rerr error) { + d, err := newDiscovery(lg, cfg, 0) + if err != nil { + return "", err + } + + defer d.close() + defer func() { + if rerr != nil { + d.lg.Error( + "discovery failed to get cluster", + zap.String("cluster", cs), + zap.Error(rerr), + ) + } else { + d.lg.Info( + "discovery got cluster successfully", + zap.String("cluster", cs), + ) + } + }() + + return d.getCluster() +} + +// JoinCluster will connect to the discovery service at the endpoints, and +// register the server represented by the given id and config to the cluster. +// The parameter `config` is supposed to be in the format "memberName=peerURLs", +// such as "member1=http://127.0.0.1:2380". +// +// The final returned string has the same format as "--initial-cluster", such as +// "infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380". +func JoinCluster(lg *zap.Logger, cfg *DiscoveryConfig, id types.ID, config string) (cs string, rerr error) { + d, err := newDiscovery(lg, cfg, id) + if err != nil { + return "", err + } + + defer d.close() + defer func() { + if rerr != nil { + d.lg.Error( + "discovery failed to join cluster", + zap.String("cluster", cs), + zap.Error(rerr), + ) + } else { + d.lg.Info( + "discovery joined cluster successfully", + zap.String("cluster", cs), + ) + } + }() + + return d.joinCluster(config) +} + +type discovery struct { + lg *zap.Logger + clusterToken string + memberID types.ID + c *clientv3.Client + retries uint + + cfg *DiscoveryConfig + + clock clockwork.Clock +} + +func newDiscovery(lg *zap.Logger, dcfg *DiscoveryConfig, id types.ID) (*discovery, error) { + if lg == nil { + lg = zap.NewNop() + } + + lg = lg.With(zap.String("discovery-token", dcfg.Token), zap.String("discovery-endpoints", strings.Join(dcfg.Endpoints, ","))) + cfg, err := clientv3.NewClientConfig(&dcfg.ConfigSpec, lg) + if err != nil { + return nil, err + } + + c, err := clientv3.New(*cfg) + if err != nil { + return nil, err + } + return &discovery{ + lg: lg, + clusterToken: dcfg.Token, + memberID: id, + c: c, + cfg: dcfg, + clock: clockwork.NewRealClock(), + }, nil +} + +func (d *discovery) getCluster() (string, error) { + cls, clusterSize, rev, err := d.checkCluster() + if err != nil { + if errors.Is(err, ErrFullCluster) { + return cls.getInitClusterStr(clusterSize) + } + return "", err + } + + for cls.Len() < clusterSize { + d.waitPeers(cls, clusterSize, rev) + } + + return cls.getInitClusterStr(clusterSize) +} + +func (d *discovery) joinCluster(config string) (string, error) { + _, _, _, err := d.checkCluster() + if err != nil { + return "", err + } + + if err = d.registerSelf(config); err != nil { + return "", err + } + + cls, clusterSize, rev, err := d.checkCluster() + if err != nil { + return "", err + } + + for cls.Len() < clusterSize { + d.waitPeers(cls, clusterSize, rev) + } + + return cls.getInitClusterStr(clusterSize) +} + +func (d *discovery) getClusterSize() (int, error) { + configKey := getClusterSizeKey(d.clusterToken) + ctx, cancel := context.WithTimeout(context.Background(), d.cfg.RequestTimeout) + defer cancel() + + resp, err := d.c.Get(ctx, configKey) + if err != nil { + d.lg.Warn( + "failed to get cluster size from discovery service", + zap.String("clusterSizeKey", configKey), + zap.Error(err), + ) + return 0, err + } + + if len(resp.Kvs) == 0 { + return 0, ErrSizeNotFound + } + + clusterSize, err := strconv.ParseInt(string(resp.Kvs[0].Value), 10, 0) + if err != nil || clusterSize <= 0 { + return 0, ErrBadSizeKey + } + + return int(clusterSize), nil +} + +func (d *discovery) getClusterMembers() (*clusterInfo, int64, error) { + membersKeyPrefix := getMemberKeyPrefix(d.clusterToken) + ctx, cancel := context.WithTimeout(context.Background(), d.cfg.RequestTimeout) + defer cancel() + + resp, err := d.c.Get(ctx, membersKeyPrefix, clientv3.WithPrefix()) + if err != nil { + d.lg.Warn( + "failed to get cluster members from discovery service", + zap.String("membersKeyPrefix", membersKeyPrefix), + zap.Error(err), + ) + return nil, 0, err + } + + cls := &clusterInfo{clusterToken: d.clusterToken} + for _, kv := range resp.Kvs { + mKey := strings.TrimSpace(string(kv.Key)) + mValue := strings.TrimSpace(string(kv.Value)) + + if err := cls.add(mKey, mValue, kv.CreateRevision); err != nil { + d.lg.Warn( + err.Error(), + zap.String("memberKey", mKey), + zap.String("memberInfo", mValue), + ) + } else { + d.lg.Info( + "found peer from discovery service", + zap.String("memberKey", mKey), + zap.String("memberInfo", mValue), + ) + } + } + + return cls, resp.Header.Revision, nil +} + +func (d *discovery) checkClusterRetry() (*clusterInfo, int, int64, error) { + if d.retries < nRetries { + d.logAndBackoffForRetry("cluster status check") + return d.checkCluster() + } + return nil, 0, 0, ErrTooManyRetries +} + +func (d *discovery) checkCluster() (*clusterInfo, int, int64, error) { + clusterSize, err := d.getClusterSize() + if err != nil { + if errors.Is(err, ErrSizeNotFound) || errors.Is(err, ErrBadSizeKey) { + return nil, 0, 0, err + } + + return d.checkClusterRetry() + } + + cls, rev, err := d.getClusterMembers() + if err != nil { + return d.checkClusterRetry() + } + d.retries = 0 + + // find self position + memberSelfID := getMemberKey(d.clusterToken, d.memberID.String()) + idx := 0 + for _, m := range cls.members { + if m.peerRegKey == memberSelfID { + break + } + if idx >= clusterSize-1 { + return cls, clusterSize, rev, ErrFullCluster + } + idx++ + } + return cls, clusterSize, rev, nil +} + +func (d *discovery) registerSelfRetry(contents string) error { + if d.retries < nRetries { + d.logAndBackoffForRetry("register member itself") + return d.registerSelf(contents) + } + return ErrTooManyRetries +} + +func (d *discovery) registerSelf(contents string) error { + ctx, cancel := context.WithTimeout(context.Background(), d.cfg.RequestTimeout) + memberKey := getMemberKey(d.clusterToken, d.memberID.String()) + _, err := d.c.Put(ctx, memberKey, contents) + cancel() + + if err != nil { + d.lg.Warn( + "failed to register members itself to the discovery service", + zap.String("memberKey", memberKey), + zap.Error(err), + ) + return d.registerSelfRetry(contents) + } + d.retries = 0 + + d.lg.Info( + "register member itself successfully", + zap.String("memberKey", memberKey), + zap.String("memberInfo", contents), + ) + + return nil +} + +func (d *discovery) waitPeers(cls *clusterInfo, clusterSize int, rev int64) { + // watch from the next revision + membersKeyPrefix := getMemberKeyPrefix(d.clusterToken) + w := d.c.Watch(context.Background(), membersKeyPrefix, clientv3.WithPrefix(), clientv3.WithRev(rev+1)) + + d.lg.Info( + "waiting for peers from discovery service", + zap.Int("clusterSize", clusterSize), + zap.Int("found-peers", cls.Len()), + ) + + // waiting for peers until all needed peers are returned + for wresp := range w { + for _, ev := range wresp.Events { + mKey := strings.TrimSpace(string(ev.Kv.Key)) + mValue := strings.TrimSpace(string(ev.Kv.Value)) + + if err := cls.add(mKey, mValue, ev.Kv.CreateRevision); err != nil { + d.lg.Warn( + err.Error(), + zap.String("memberKey", mKey), + zap.String("memberInfo", mValue), + ) + } else { + d.lg.Info( + "found peer from discovery service", + zap.String("memberKey", mKey), + zap.String("memberInfo", mValue), + ) + } + } + + if cls.Len() >= clusterSize { + break + } + } + + d.lg.Info( + "found all needed peers from discovery service", + zap.Int("clusterSize", clusterSize), + zap.Int("found-peers", cls.Len()), + ) +} + +func (d *discovery) logAndBackoffForRetry(step string) { + d.retries++ + // logAndBackoffForRetry stops exponential backoff when the retries are + // more than maxExpoentialRetries and is set to a constant backoff afterward. + retries := d.retries + if retries > maxExponentialRetries { + retries = maxExponentialRetries + } + retryTimeInSecond := time.Duration(0x1< clusterSize { + peerURLs = peerURLs[:clusterSize] + } + + us := strings.Join(peerURLs, ",") + _, err := types.NewURLsMap(us) + if err != nil { + return us, ErrInvalidURL + } + + return us, nil +} + +func (cls *clusterInfo) getPeerURLs() []string { + var peerURLs []string + for _, peer := range cls.members { + peerURLs = append(peerURLs, peer.peerURLsMap) + } + return peerURLs +} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3election/election.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3election/election.go index 78b26ad7cc924..77a9c4bcb42c8 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3election/election.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3election/election.go @@ -18,7 +18,7 @@ import ( "context" "errors" - "go.etcd.io/etcd/client/v3" + clientv3 "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/concurrency" epb "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb" ) diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go index 4a6e5e724cc83..9c0ae68d0202c 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go @@ -9,19 +9,21 @@ It translates gRPC into RESTful JSON APIs. package gw import ( + protov1 "github.com/golang/protobuf/proto" + "context" "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb" "io" "net/http" - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" + "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" ) // Suppress "imported and not used" errors @@ -30,22 +32,18 @@ var _ io.Reader var _ status.Status var _ = runtime.String var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage +var _ = metadata.Join func request_Election_Campaign_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq v3electionpb.CampaignRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Campaign(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -53,16 +51,12 @@ func local_request_Election_Campaign_0(ctx context.Context, marshaler runtime.Ma var protoReq v3electionpb.CampaignRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.Campaign(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -70,16 +64,12 @@ func request_Election_Proclaim_0(ctx context.Context, marshaler runtime.Marshale var protoReq v3electionpb.ProclaimRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Proclaim(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -87,16 +77,12 @@ func local_request_Election_Proclaim_0(ctx context.Context, marshaler runtime.Ma var protoReq v3electionpb.ProclaimRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.Proclaim(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -104,16 +90,12 @@ func request_Election_Leader_0(ctx context.Context, marshaler runtime.Marshaler, var protoReq v3electionpb.LeaderRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Leader(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -121,16 +103,12 @@ func local_request_Election_Leader_0(ctx context.Context, marshaler runtime.Mars var protoReq v3electionpb.LeaderRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.Leader(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -138,11 +116,7 @@ func request_Election_Observe_0(ctx context.Context, marshaler runtime.Marshaler var protoReq v3electionpb.LeaderRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -163,16 +137,12 @@ func request_Election_Resign_0(ctx context.Context, marshaler runtime.Marshaler, var protoReq v3electionpb.ResignRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Resign(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -180,81 +150,94 @@ func local_request_Election_Resign_0(ctx context.Context, marshaler runtime.Mars var protoReq v3electionpb.ResignRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.Resign(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } // v3electionpb.RegisterElectionHandlerServer registers the http handlers for service Election to "mux". // UnaryRPC :call v3electionpb.ElectionServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterElectionHandlerFromEndpoint instead. +// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call. func RegisterElectionHandlerServer(ctx context.Context, mux *runtime.ServeMux, server v3electionpb.ElectionServer) error { mux.Handle("POST", pattern_Election_Campaign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/v3electionpb.Election/Campaign", runtime.WithHTTPPathPattern("/v3/election/campaign")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Election_Campaign_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_Election_Campaign_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Election_Campaign_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Election_Campaign_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_Election_Proclaim_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/v3electionpb.Election/Proclaim", runtime.WithHTTPPathPattern("/v3/election/proclaim")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Election_Proclaim_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_Election_Proclaim_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Election_Proclaim_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Election_Proclaim_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_Election_Leader_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/v3electionpb.Election/Leader", runtime.WithHTTPPathPattern("/v3/election/leader")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Election_Leader_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_Election_Leader_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Election_Leader_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Election_Leader_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -268,20 +251,25 @@ func RegisterElectionHandlerServer(ctx context.Context, mux *runtime.ServeMux, s mux.Handle("POST", pattern_Election_Resign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/v3electionpb.Election/Resign", runtime.WithHTTPPathPattern("/v3/election/resign")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Election_Resign_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_Election_Resign_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Election_Resign_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Election_Resign_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -291,21 +279,21 @@ func RegisterElectionHandlerServer(ctx context.Context, mux *runtime.ServeMux, s // RegisterElectionHandlerFromEndpoint is same as RegisterElectionHandler but // automatically dials to "endpoint" and closes the connection when "ctx" gets done. func RegisterElectionHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) + conn, err := grpc.NewClient(endpoint, opts...) if err != nil { return err } defer func() { if err != nil { if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr) } return } go func() { <-ctx.Done() if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr) } }() }() @@ -323,26 +311,28 @@ func RegisterElectionHandler(ctx context.Context, mux *runtime.ServeMux, conn *g // to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "ElectionClient". // Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ElectionClient" // doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "ElectionClient" to call the correct interceptors. +// "ElectionClient" to call the correct interceptors. This client ignores the HTTP middlewares. func RegisterElectionHandlerClient(ctx context.Context, mux *runtime.ServeMux, client v3electionpb.ElectionClient) error { mux.Handle("POST", pattern_Election_Campaign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/v3electionpb.Election/Campaign", runtime.WithHTTPPathPattern("/v3/election/campaign")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Election_Campaign_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_Election_Campaign_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Election_Campaign_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Election_Campaign_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -350,19 +340,21 @@ func RegisterElectionHandlerClient(ctx context.Context, mux *runtime.ServeMux, c ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/v3electionpb.Election/Proclaim", runtime.WithHTTPPathPattern("/v3/election/proclaim")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Election_Proclaim_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_Election_Proclaim_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Election_Proclaim_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Election_Proclaim_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -370,19 +362,21 @@ func RegisterElectionHandlerClient(ctx context.Context, mux *runtime.ServeMux, c ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/v3electionpb.Election/Leader", runtime.WithHTTPPathPattern("/v3/election/leader")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Election_Leader_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_Election_Leader_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Election_Leader_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Election_Leader_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -390,19 +384,24 @@ func RegisterElectionHandlerClient(ctx context.Context, mux *runtime.ServeMux, c ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/v3electionpb.Election/Observe", runtime.WithHTTPPathPattern("/v3/election/observe")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Election_Observe_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_Election_Observe_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Election_Observe_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + forward_Election_Observe_0(annotatedContext, mux, outboundMarshaler, w, req, func() (proto.Message, error) { + m1, err := resp.Recv() + return protov1.MessageV2(m1), err + }, mux.GetForwardResponseOptions()...) }) @@ -410,19 +409,21 @@ func RegisterElectionHandlerClient(ctx context.Context, mux *runtime.ServeMux, c ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/v3electionpb.Election/Resign", runtime.WithHTTPPathPattern("/v3/election/resign")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Election_Resign_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_Election_Resign_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Election_Resign_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Election_Resign_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -430,15 +431,15 @@ func RegisterElectionHandlerClient(ctx context.Context, mux *runtime.ServeMux, c } var ( - pattern_Election_Campaign_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "election", "campaign"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Election_Campaign_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "election", "campaign"}, "")) - pattern_Election_Proclaim_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "election", "proclaim"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Election_Proclaim_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "election", "proclaim"}, "")) - pattern_Election_Leader_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "election", "leader"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Election_Leader_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "election", "leader"}, "")) - pattern_Election_Observe_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "election", "observe"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Election_Observe_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "election", "observe"}, "")) - pattern_Election_Resign_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "election", "resign"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Election_Resign_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "election", "resign"}, "")) ) var ( diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb/v3election.pb.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb/v3election.pb.go index 81cf59b9b25e1..4a022f1b6281a 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb/v3election.pb.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb/v3election.pb.go @@ -552,41 +552,42 @@ func init() { func init() { proto.RegisterFile("v3election.proto", fileDescriptor_c9b1f26cc432a035) } var fileDescriptor_c9b1f26cc432a035 = []byte{ - // 531 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x94, 0xcf, 0x6e, 0xd3, 0x40, - 0x10, 0xc6, 0x59, 0x27, 0x84, 0x32, 0xa4, 0xad, 0x65, 0x82, 0x08, 0x21, 0xb8, 0xd1, 0x72, 0xa9, - 0x72, 0xb0, 0x51, 0xc3, 0x29, 0x27, 0x04, 0x02, 0x55, 0x2a, 0x12, 0xe0, 0x03, 0x82, 0xe3, 0xda, - 0x1d, 0xb9, 0x91, 0x1d, 0xaf, 0xb1, 0x5d, 0x4b, 0xb9, 0xf2, 0x0a, 0x1c, 0xe0, 0x91, 0x38, 0x22, - 0xf1, 0x02, 0x28, 0xf0, 0x20, 0x68, 0x77, 0xed, 0xfa, 0x8f, 0x12, 0x84, 0x9a, 0xdb, 0x78, 0xe7, - 0xdb, 0xf9, 0xcd, 0x37, 0x3b, 0x09, 0xe8, 0xf9, 0x0c, 0x43, 0xf4, 0xb2, 0x05, 0x8f, 0xac, 0x38, - 0xe1, 0x19, 0x37, 0xfa, 0xd5, 0x49, 0xec, 0x8e, 0x06, 0x3e, 0xf7, 0xb9, 0x4c, 0xd8, 0x22, 0x52, - 0x9a, 0xd1, 0x11, 0x66, 0xde, 0xb9, 0xcd, 0xe2, 0x85, 0x2d, 0x82, 0x14, 0x93, 0x1c, 0x93, 0xd8, - 0xb5, 0x93, 0xd8, 0x2b, 0x04, 0xc3, 0x2b, 0xc1, 0x32, 0xf7, 0xbc, 0xd8, 0xb5, 0x83, 0xbc, 0xc8, - 0x8c, 0x7d, 0xce, 0xfd, 0x10, 0x65, 0x8e, 0x45, 0x11, 0xcf, 0x98, 0x20, 0xa5, 0x2a, 0x4b, 0xdf, - 0xc1, 0xe1, 0x0b, 0xb6, 0x8c, 0xd9, 0xc2, 0x8f, 0x1c, 0xfc, 0x74, 0x89, 0x69, 0x66, 0x18, 0xd0, - 0x8d, 0xd8, 0x12, 0x87, 0x64, 0x42, 0x8e, 0xfb, 0x8e, 0x8c, 0x8d, 0x01, 0xdc, 0x0c, 0x91, 0xa5, - 0x38, 0xd4, 0x26, 0xe4, 0xb8, 0xe3, 0xa8, 0x0f, 0x71, 0x9a, 0xb3, 0xf0, 0x12, 0x87, 0x1d, 0x29, - 0x55, 0x1f, 0x74, 0x05, 0x7a, 0x55, 0x32, 0x8d, 0x79, 0x94, 0xa2, 0xf1, 0x14, 0x7a, 0x17, 0xc8, - 0xce, 0x31, 0x91, 0x55, 0xef, 0x9c, 0x8c, 0xad, 0xba, 0x0f, 0xab, 0xd4, 0x9d, 0x4a, 0x8d, 0x53, - 0x68, 0x0d, 0x1b, 0x7a, 0xa1, 0xba, 0xa5, 0xc9, 0x5b, 0xf7, 0xad, 0xfa, 0xa8, 0xac, 0xd7, 0x32, - 0x77, 0x86, 0x2b, 0xa7, 0x90, 0xd1, 0x8f, 0x70, 0xfb, 0xea, 0x70, 0xa3, 0x0f, 0x1d, 0x3a, 0x01, - 0xae, 0x64, 0xb9, 0xbe, 0x23, 0x42, 0x71, 0x92, 0x60, 0x2e, 0x1d, 0x74, 0x1c, 0x11, 0x56, 0x5e, - 0xbb, 0x35, 0xaf, 0xf4, 0x31, 0xec, 0xab, 0xd2, 0xff, 0x18, 0x13, 0xbd, 0x80, 0x83, 0x52, 0xb4, - 0x93, 0xf1, 0x09, 0x68, 0x41, 0x5e, 0x98, 0xd6, 0x2d, 0xf5, 0xa2, 0xd6, 0x19, 0xae, 0xde, 0x8b, - 0x01, 0x3b, 0x5a, 0x90, 0xd3, 0x67, 0xb0, 0xef, 0x60, 0x5a, 0x7b, 0xb5, 0x6a, 0x56, 0xe4, 0xff, - 0x66, 0xf5, 0x0a, 0x0e, 0xca, 0x0a, 0xbb, 0xf4, 0x4a, 0x3f, 0xc0, 0xe1, 0xdb, 0x84, 0x7b, 0x21, - 0x5b, 0x2c, 0xaf, 0xdb, 0x4b, 0xb5, 0x48, 0x5a, 0x7d, 0x91, 0x4e, 0x41, 0xaf, 0x2a, 0xef, 0xd2, - 0xe3, 0xc9, 0xd7, 0x2e, 0xec, 0xbd, 0x2c, 0x1a, 0x30, 0x02, 0xd8, 0x2b, 0xf7, 0xd3, 0x78, 0xd4, - 0xec, 0xac, 0xf5, 0x53, 0x18, 0x99, 0xdb, 0xd2, 0x8a, 0x42, 0x27, 0x9f, 0x7f, 0xfe, 0xf9, 0xa2, - 0x8d, 0xe8, 0x3d, 0x3b, 0x9f, 0xd9, 0xa5, 0xd0, 0xf6, 0x0a, 0xd9, 0x9c, 0x4c, 0x05, 0xac, 0xf4, - 0xd0, 0x86, 0xb5, 0xa6, 0xd6, 0x86, 0xb5, 0xad, 0x6f, 0x81, 0xc5, 0x85, 0x4c, 0xc0, 0x3c, 0xe8, - 0xa9, 0xd9, 0x1a, 0x0f, 0x37, 0x4d, 0xbc, 0x04, 0x8d, 0x37, 0x27, 0x0b, 0x8c, 0x29, 0x31, 0x43, - 0x7a, 0xb7, 0x81, 0x51, 0x0f, 0x25, 0x20, 0x3e, 0xdc, 0x7a, 0xe3, 0xca, 0x81, 0xef, 0x42, 0x39, - 0x92, 0x94, 0x07, 0x74, 0xd0, 0xa0, 0x70, 0x55, 0x78, 0x4e, 0xa6, 0x4f, 0x88, 0x70, 0xa3, 0x16, - 0xb4, 0xcd, 0x69, 0x2c, 0x7e, 0x9b, 0xd3, 0xdc, 0xe9, 0x2d, 0x6e, 0x12, 0x29, 0x9a, 0x93, 0xe9, - 0x73, 0xfd, 0xfb, 0xda, 0x24, 0x3f, 0xd6, 0x26, 0xf9, 0xb5, 0x36, 0xc9, 0xb7, 0xdf, 0xe6, 0x0d, - 0xb7, 0x27, 0xff, 0x18, 0x67, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x9c, 0xe6, 0x7c, 0x66, 0xa9, - 0x05, 0x00, 0x00, + // 556 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x94, 0x41, 0x6f, 0xd3, 0x30, + 0x14, 0xc7, 0x71, 0x5a, 0xca, 0x78, 0x74, 0x5b, 0x15, 0x8a, 0x28, 0xa5, 0x64, 0x95, 0xb9, 0x4c, + 0x3d, 0xc4, 0x68, 0xe5, 0xd4, 0xd3, 0x04, 0x02, 0x4d, 0x1a, 0x12, 0xe0, 0x03, 0x02, 0x6e, 0x6e, + 0xf6, 0x94, 0x55, 0x4d, 0xe3, 0x90, 0x74, 0x91, 0x7a, 0xe5, 0x2b, 0x70, 0x80, 0x8f, 0xc4, 0x11, + 0x89, 0x2f, 0x80, 0x0a, 0x1f, 0x04, 0xd9, 0x4e, 0x9a, 0x34, 0x6a, 0x11, 0x5a, 0x6f, 0x8e, 0xdf, + 0xdf, 0xef, 0xf7, 0xfe, 0xcf, 0x2f, 0x86, 0x56, 0x3a, 0xc4, 0x00, 0xbd, 0xf9, 0x44, 0x86, 0x6e, + 0x14, 0xcb, 0xb9, 0xb4, 0x9b, 0xc5, 0x4e, 0x34, 0xee, 0xb6, 0x7d, 0xe9, 0x4b, 0x1d, 0x60, 0x6a, + 0x65, 0x34, 0xdd, 0x23, 0x9c, 0x7b, 0x17, 0x4c, 0x44, 0x13, 0xa6, 0x16, 0x09, 0xc6, 0x29, 0xc6, + 0xd1, 0x98, 0xc5, 0x91, 0x97, 0x09, 0x3a, 0x2b, 0xc1, 0x2c, 0xf5, 0xbc, 0x68, 0xcc, 0xa6, 0x69, + 0x16, 0xe9, 0xf9, 0x52, 0xfa, 0x01, 0xea, 0x98, 0x08, 0x43, 0x39, 0x17, 0x8a, 0x94, 0x98, 0x28, + 0x7d, 0x0b, 0x87, 0xcf, 0xc5, 0x2c, 0x12, 0x13, 0x3f, 0xe4, 0xf8, 0xe9, 0x0a, 0x93, 0xb9, 0x6d, + 0x43, 0x3d, 0x14, 0x33, 0xec, 0x90, 0x3e, 0x39, 0x6e, 0x72, 0xbd, 0xb6, 0xdb, 0x70, 0x33, 0x40, + 0x91, 0x60, 0xc7, 0xea, 0x93, 0xe3, 0x1a, 0x37, 0x1f, 0x6a, 0x37, 0x15, 0xc1, 0x15, 0x76, 0x6a, + 0x5a, 0x6a, 0x3e, 0xe8, 0x02, 0x5a, 0x45, 0xca, 0x24, 0x92, 0x61, 0x82, 0xf6, 0x53, 0x68, 0x5c, + 0xa2, 0xb8, 0xc0, 0x58, 0x67, 0xbd, 0x73, 0xd2, 0x73, 0xcb, 0x3e, 0xdc, 0x5c, 0x77, 0xa6, 0x35, + 0x3c, 0xd3, 0xda, 0x0c, 0x1a, 0x81, 0x39, 0x65, 0xe9, 0x53, 0xf7, 0xdd, 0x72, 0xab, 0xdc, 0x57, + 0x3a, 0x76, 0x8e, 0x0b, 0x9e, 0xc9, 0xe8, 0x07, 0xb8, 0xbd, 0xda, 0xdc, 0xe8, 0xa3, 0x05, 0xb5, + 0x29, 0x2e, 0x74, 0xba, 0x26, 0x57, 0x4b, 0xb5, 0x13, 0x63, 0xaa, 0x1d, 0xd4, 0xb8, 0x5a, 0x16, + 0x5e, 0xeb, 0x25, 0xaf, 0xf4, 0x31, 0xec, 0x9b, 0xd4, 0xff, 0x68, 0x13, 0xbd, 0x84, 0x83, 0x5c, + 0xb4, 0x93, 0xf1, 0x3e, 0x58, 0xd3, 0x34, 0x33, 0xdd, 0x72, 0xcd, 0x8d, 0xba, 0xe7, 0xb8, 0x78, + 0xa7, 0x1a, 0xcc, 0xad, 0x69, 0x4a, 0x4f, 0x61, 0x9f, 0x63, 0x52, 0xba, 0xb5, 0xa2, 0x57, 0xe4, + 0xff, 0x7a, 0xf5, 0x12, 0x0e, 0xf2, 0x0c, 0xbb, 0xd4, 0x4a, 0xdf, 0xc3, 0xe1, 0x9b, 0x58, 0x7a, + 0x81, 0x98, 0xcc, 0xae, 0x5b, 0x4b, 0x31, 0x48, 0x56, 0x79, 0x90, 0xce, 0xa0, 0x55, 0x64, 0xde, + 0xa5, 0xc6, 0x93, 0xaf, 0x75, 0xd8, 0x7b, 0x91, 0x15, 0x60, 0x4f, 0x61, 0x2f, 0x9f, 0x4f, 0xfb, + 0xd1, 0x7a, 0x65, 0x95, 0x5f, 0xa1, 0xeb, 0x6c, 0x0b, 0x1b, 0x0a, 0xed, 0x7f, 0xfe, 0xf9, 0xe7, + 0x8b, 0xd5, 0xa5, 0xf7, 0x58, 0x3a, 0x64, 0xb9, 0x90, 0x79, 0x99, 0x6c, 0x44, 0x06, 0x0a, 0x96, + 0x7b, 0xa8, 0xc2, 0x2a, 0x5d, 0xab, 0xc2, 0xaa, 0xd6, 0xb7, 0xc0, 0xa2, 0x4c, 0xa6, 0x60, 0x1e, + 0x34, 0x4c, 0x6f, 0xed, 0x87, 0x9b, 0x3a, 0x9e, 0x83, 0x7a, 0x9b, 0x83, 0x19, 0xc6, 0xd1, 0x98, + 0x0e, 0xbd, 0xbb, 0x86, 0x31, 0x17, 0xa5, 0x20, 0x3e, 0xdc, 0x7a, 0x3d, 0xd6, 0x0d, 0xdf, 0x85, + 0x72, 0xa4, 0x29, 0x0f, 0x68, 0x7b, 0x8d, 0x22, 0x4d, 0xe2, 0x11, 0x19, 0x3c, 0x21, 0xca, 0x8d, + 0x19, 0xd0, 0x2a, 0x67, 0x6d, 0xf0, 0xab, 0x9c, 0xf5, 0x99, 0xde, 0xe2, 0x26, 0xd6, 0xa2, 0x11, + 0x19, 0x3c, 0xe3, 0xdf, 0x97, 0x0e, 0xf9, 0xb1, 0x74, 0xc8, 0xaf, 0xa5, 0x43, 0xbe, 0xfd, 0x76, + 0x6e, 0x7c, 0x3c, 0xf5, 0xa5, 0x9e, 0x29, 0x77, 0x22, 0xf5, 0x63, 0xcb, 0xcc, 0x70, 0xe9, 0xf3, + 0xab, 0x51, 0xd3, 0xaf, 0x69, 0xc1, 0x65, 0xe5, 0x12, 0xc6, 0x0d, 0xfd, 0xb4, 0x0e, 0xff, 0x06, + 0x00, 0x00, 0xff, 0xff, 0xcd, 0x58, 0x82, 0xe2, 0xeb, 0x05, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb/v3election.proto b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb/v3election.proto index 24ccaf138d896..0bcaad875782c 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb/v3election.proto +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb/v3election.proto @@ -8,6 +8,8 @@ import "etcd/api/mvccpb/kv.proto"; // for grpc-gateway import "google/api/annotations.proto"; +option go_package = "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb"; + option (gogoproto.marshaler_all) = true; option (gogoproto.unmarshaler_all) = true; diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/lock.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/lock.go index 8f9623361c78b..c8ef56ebaebb1 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/lock.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/lock.go @@ -17,7 +17,7 @@ package v3lock import ( "context" - "go.etcd.io/etcd/client/v3" + clientv3 "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/concurrency" "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb" ) diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go index dc573d79cf0f1..2437489ee16d2 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go @@ -9,19 +9,21 @@ It translates gRPC into RESTful JSON APIs. package gw import ( + protov1 "github.com/golang/protobuf/proto" + "context" "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb" "io" "net/http" - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" + "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" ) // Suppress "imported and not used" errors @@ -30,22 +32,18 @@ var _ io.Reader var _ status.Status var _ = runtime.String var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage +var _ = metadata.Join func request_Lock_Lock_0(ctx context.Context, marshaler runtime.Marshaler, client v3lockpb.LockClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq v3lockpb.LockRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Lock(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -53,16 +51,12 @@ func local_request_Lock_Lock_0(ctx context.Context, marshaler runtime.Marshaler, var protoReq v3lockpb.LockRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.Lock(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -70,16 +64,12 @@ func request_Lock_Unlock_0(ctx context.Context, marshaler runtime.Marshaler, cli var protoReq v3lockpb.UnlockRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Unlock(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } @@ -87,61 +77,69 @@ func local_request_Lock_Unlock_0(ctx context.Context, marshaler runtime.Marshale var protoReq v3lockpb.UnlockRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := server.Unlock(ctx, &protoReq) - return msg, metadata, err + return protov1.MessageV2(msg), metadata, err } // v3lockpb.RegisterLockHandlerServer registers the http handlers for service Lock to "mux". // UnaryRPC :call v3lockpb.LockServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterLockHandlerFromEndpoint instead. +// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call. func RegisterLockHandlerServer(ctx context.Context, mux *runtime.ServeMux, server v3lockpb.LockServer) error { mux.Handle("POST", pattern_Lock_Lock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/v3lockpb.Lock/Lock", runtime.WithHTTPPathPattern("/v3/lock/lock")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Lock_Lock_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_Lock_Lock_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Lock_Lock_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Lock_Lock_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_Lock_Unlock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/v3lockpb.Lock/Unlock", runtime.WithHTTPPathPattern("/v3/lock/unlock")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Lock_Unlock_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := local_request_Lock_Unlock_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Lock_Unlock_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Lock_Unlock_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -151,21 +149,21 @@ func RegisterLockHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve // RegisterLockHandlerFromEndpoint is same as RegisterLockHandler but // automatically dials to "endpoint" and closes the connection when "ctx" gets done. func RegisterLockHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) + conn, err := grpc.NewClient(endpoint, opts...) if err != nil { return err } defer func() { if err != nil { if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr) } return } go func() { <-ctx.Done() if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr) } }() }() @@ -183,26 +181,28 @@ func RegisterLockHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. // to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "LockClient". // Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "LockClient" // doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "LockClient" to call the correct interceptors. +// "LockClient" to call the correct interceptors. This client ignores the HTTP middlewares. func RegisterLockHandlerClient(ctx context.Context, mux *runtime.ServeMux, client v3lockpb.LockClient) error { mux.Handle("POST", pattern_Lock_Lock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/v3lockpb.Lock/Lock", runtime.WithHTTPPathPattern("/v3/lock/lock")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Lock_Lock_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_Lock_Lock_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Lock_Lock_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Lock_Lock_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -210,19 +210,21 @@ func RegisterLockHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/v3lockpb.Lock/Unlock", runtime.WithHTTPPathPattern("/v3/lock/unlock")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Lock_Unlock_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_Lock_Unlock_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_Lock_Unlock_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Lock_Unlock_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -230,9 +232,9 @@ func RegisterLockHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien } var ( - pattern_Lock_Lock_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 1}, []string{"v3", "lock"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Lock_Lock_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 1}, []string{"v3", "lock"}, "")) - pattern_Lock_Unlock_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "lock", "unlock"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Lock_Unlock_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "lock", "unlock"}, "")) ) var ( diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go index 4282ddc85e04e..39c04abe179b3 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go @@ -254,28 +254,30 @@ func init() { func init() { proto.RegisterFile("v3lock.proto", fileDescriptor_52389b3e2f253201) } var fileDescriptor_52389b3e2f253201 = []byte{ - // 330 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x33, 0xce, 0xc9, - 0x4f, 0xce, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x80, 0xf0, 0x0a, 0x92, 0xa4, 0x44, - 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x82, 0xfa, 0x20, 0x16, 0x44, 0x5e, 0x4a, 0x3e, 0xb5, 0x24, 0x39, - 0x45, 0x3f, 0xb1, 0x20, 0x53, 0x1f, 0xc4, 0x28, 0x4e, 0x2d, 0x2a, 0x4b, 0x2d, 0x2a, 0x48, 0xd2, - 0x2f, 0x2a, 0x48, 0x86, 0x2a, 0x90, 0x49, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0x05, 0x2b, 0x49, 0xcc, - 0xcb, 0xcb, 0x2f, 0x49, 0x2c, 0xc9, 0xcc, 0xcf, 0x2b, 0x86, 0xc8, 0x2a, 0x99, 0x73, 0x71, 0xfb, - 0xe4, 0x27, 0x67, 0x07, 0xa5, 0x16, 0x96, 0xa6, 0x16, 0x97, 0x08, 0x09, 0x71, 0xb1, 0xe4, 0x25, - 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30, 0x6a, 0xf0, 0x04, 0x81, 0xd9, 0x42, 0x22, 0x5c, 0xac, 0x39, - 0xa9, 0x89, 0xc5, 0xa9, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x10, 0x8e, 0x52, 0x18, 0x17, - 0x0f, 0x44, 0x63, 0x71, 0x41, 0x7e, 0x5e, 0x71, 0xaa, 0x90, 0x09, 0x17, 0x5b, 0x46, 0x6a, 0x62, - 0x4a, 0x6a, 0x11, 0x58, 0x2f, 0xb7, 0x91, 0x8c, 0x1e, 0xb2, 0x7b, 0xf4, 0x60, 0xea, 0x3c, 0xc0, - 0x6a, 0x82, 0xa0, 0x6a, 0x85, 0x04, 0xb8, 0x98, 0xb3, 0x53, 0x2b, 0xc1, 0x26, 0xf3, 0x04, 0x81, - 0x98, 0x4a, 0x8a, 0x5c, 0xbc, 0xa1, 0x79, 0x39, 0x48, 0x4e, 0x82, 0x2a, 0x61, 0x44, 0x28, 0x71, - 0xe3, 0xe2, 0x83, 0x29, 0xa1, 0xc4, 0x72, 0xa3, 0x0d, 0x8c, 0x5c, 0x2c, 0x20, 0x3f, 0x08, 0xf9, - 0x43, 0x69, 0x51, 0x3d, 0x58, 0x60, 0xeb, 0x21, 0x05, 0x8a, 0x94, 0x18, 0xba, 0x30, 0xc4, 0x34, - 0x25, 0x89, 0xa6, 0xcb, 0x4f, 0x26, 0x33, 0x09, 0x29, 0xf1, 0xea, 0x97, 0x19, 0xeb, 0x83, 0x14, - 0x80, 0x09, 0x2b, 0x46, 0x2d, 0xa1, 0x70, 0x2e, 0x36, 0x88, 0x0b, 0x85, 0xc4, 0x11, 0x7a, 0x51, - 0xbc, 0x25, 0x25, 0x81, 0x29, 0x01, 0x35, 0x56, 0x0a, 0x6c, 0xac, 0x88, 0x12, 0x3f, 0xdc, 0xd8, - 0xd2, 0x3c, 0xa8, 0xc1, 0x4e, 0x02, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, - 0x91, 0x1c, 0xe3, 0x8c, 0xc7, 0x72, 0x0c, 0x49, 0x6c, 0xe0, 0x78, 0x34, 0x06, 0x04, 0x00, 0x00, - 0xff, 0xff, 0x4a, 0x4d, 0xca, 0xbb, 0x36, 0x02, 0x00, 0x00, + // 356 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x92, 0xcd, 0x4a, 0xc3, 0x40, + 0x10, 0xc7, 0xdd, 0xb6, 0x16, 0xd9, 0xa6, 0x2a, 0x4b, 0xd5, 0x10, 0x4a, 0xac, 0x39, 0x15, 0x0f, + 0x59, 0x68, 0x05, 0xc5, 0xa3, 0x07, 0x11, 0x11, 0x84, 0x80, 0x0a, 0xde, 0xd2, 0x74, 0x88, 0xa5, + 0x71, 0x27, 0x26, 0x69, 0xc1, 0xab, 0xaf, 0xe0, 0xc5, 0xc7, 0xf0, 0x31, 0x3c, 0x0a, 0xbe, 0x80, + 0x54, 0x1f, 0x44, 0x32, 0x9b, 0xd8, 0xaa, 0x47, 0x2f, 0xc9, 0xec, 0xce, 0x6f, 0xfe, 0xf3, 0xb1, + 0xc3, 0x8d, 0x69, 0x3f, 0xc2, 0x60, 0xec, 0xc6, 0x09, 0x66, 0x28, 0x56, 0xf4, 0x29, 0x1e, 0x58, + 0xad, 0x10, 0x43, 0xa4, 0x4b, 0x99, 0x5b, 0xda, 0x6f, 0x6d, 0x43, 0x16, 0x0c, 0xa5, 0x1f, 0x8f, + 0x64, 0x6e, 0xa4, 0x90, 0x4c, 0x21, 0x89, 0x07, 0x32, 0x89, 0x83, 0x02, 0x68, 0x87, 0x88, 0x61, + 0x04, 0x84, 0xf8, 0x4a, 0x61, 0xe6, 0x67, 0x23, 0x54, 0xa9, 0xf6, 0x3a, 0xfb, 0xbc, 0x71, 0x86, + 0xc1, 0xd8, 0x83, 0xbb, 0x09, 0xa4, 0x99, 0x10, 0xbc, 0xa6, 0xfc, 0x5b, 0x30, 0x59, 0x87, 0x75, + 0x0d, 0x8f, 0x6c, 0xd1, 0xe2, 0xcb, 0x11, 0xf8, 0x29, 0x98, 0x95, 0x0e, 0xeb, 0x56, 0x3d, 0x7d, + 0x70, 0x2e, 0xb9, 0xa1, 0x03, 0xd3, 0x18, 0x55, 0x0a, 0x62, 0x8f, 0xd7, 0x6f, 0xc0, 0x1f, 0x42, + 0x42, 0xb1, 0x8d, 0x5e, 0xdb, 0x5d, 0xac, 0xc7, 0x2d, 0xb9, 0x13, 0x62, 0xbc, 0x82, 0x15, 0xeb, + 0xbc, 0x3a, 0x86, 0x7b, 0x52, 0x36, 0xbc, 0xdc, 0x74, 0x76, 0x78, 0xf3, 0x42, 0x45, 0x0b, 0x25, + 0x15, 0x08, 0x9b, 0x23, 0xc7, 0x7c, 0xb5, 0x44, 0xfe, 0x93, 0xbc, 0xf7, 0xcc, 0x78, 0x2d, 0xef, + 0x41, 0x9c, 0x17, 0xff, 0x0d, 0xb7, 0x1c, 0xb6, 0xbb, 0x30, 0x14, 0x6b, 0xf3, 0xf7, 0xb5, 0x56, + 0x73, 0xcc, 0x87, 0xb7, 0xcf, 0xc7, 0x8a, 0x70, 0x9a, 0x72, 0xda, 0x97, 0x39, 0x40, 0x9f, 0x43, + 0xb6, 0x2b, 0xae, 0x78, 0x5d, 0x57, 0x28, 0xb6, 0xe6, 0xb1, 0x3f, 0xda, 0xb2, 0xcc, 0xbf, 0x8e, + 0x42, 0xd6, 0x22, 0xd9, 0x96, 0xb3, 0xf6, 0x2d, 0x3b, 0x51, 0x85, 0xf0, 0xd1, 0xe9, 0xcb, 0xcc, + 0x66, 0xaf, 0x33, 0x9b, 0xbd, 0xcf, 0x6c, 0xf6, 0xf4, 0x61, 0x2f, 0x5d, 0x1f, 0x84, 0x48, 0xcd, + 0xba, 0x23, 0xa4, 0x0d, 0x90, 0xba, 0xeb, 0x3c, 0x76, 0x3e, 0x03, 0x7a, 0x7c, 0x9d, 0x4f, 0x96, + 0x69, 0x07, 0x75, 0xda, 0x80, 0xfe, 0x57, 0x00, 0x00, 0x00, 0xff, 0xff, 0xcb, 0x48, 0x31, 0x4a, + 0x70, 0x02, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb/v3lock.proto b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb/v3lock.proto index 1b5c456ae2322..88a1c8242849c 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb/v3lock.proto +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb/v3lock.proto @@ -7,6 +7,8 @@ import "etcd/api/etcdserverpb/rpc.proto"; // for grpc-gateway import "google/api/annotations.proto"; +option go_package = "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb"; + option (gogoproto.marshaler_all) = true; option (gogoproto.unmarshaler_all) = true; diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/auth.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/auth.go index d986037a1b4ca..6c5db76cb8e3f 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/auth.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/auth.go @@ -18,6 +18,7 @@ import ( "context" pb "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.etcd.io/etcd/server/v3/auth" "go.etcd.io/etcd/server/v3/etcdserver" ) @@ -164,3 +165,23 @@ func (as *AuthServer) UserChangePassword(ctx context.Context, r *pb.AuthUserChan } return resp, nil } + +type AuthGetter interface { + AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error) + AuthStore() auth.AuthStore +} + +type AuthAdmin struct { + ag AuthGetter +} + +// isPermitted verifies the user has admin privilege. +// Only users with "root" role are permitted. +func (aa *AuthAdmin) isPermitted(ctx context.Context) error { + authInfo, err := aa.ag.AuthInfoFromCtx(ctx) + if err != nil { + return err + } + + return aa.ag.AuthStore().IsAdminPermitted(authInfo) +} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/codec.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/codec.go index d599ff63cc3ad..1bbed83971249 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/codec.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/codec.go @@ -18,13 +18,13 @@ import "github.com/golang/protobuf/proto" type codec struct{} -func (c *codec) Marshal(v interface{}) ([]byte, error) { +func (c *codec) Marshal(v any) ([]byte, error) { b, err := proto.Marshal(v.(proto.Message)) sentBytes.Add(float64(len(b))) return b, err } -func (c *codec) Unmarshal(data []byte, v interface{}) error { +func (c *codec) Unmarshal(data []byte, v any) error { receivedBytes.Add(float64(len(data))) return proto.Unmarshal(data, v.(proto.Message)) } diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/grpc.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/grpc.go index 6947903a3865f..a20a9cd3f586b 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/grpc.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/grpc.go @@ -18,16 +18,15 @@ import ( "crypto/tls" "math" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/client/v3/credentials" - "go.etcd.io/etcd/server/v3/etcdserver" - - grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" "google.golang.org/grpc" "google.golang.org/grpc/health" healthpb "google.golang.org/grpc/health/grpc_health_v1" + + pb "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.etcd.io/etcd/client/v3/credentials" + "go.etcd.io/etcd/server/v3/etcdserver" ) const ( @@ -39,8 +38,7 @@ func Server(s *etcdserver.EtcdServer, tls *tls.Config, interceptor grpc.UnarySer var opts []grpc.ServerOption opts = append(opts, grpc.CustomCodec(&codec{})) if tls != nil { - bundle := credentials.NewBundle(credentials.Config{TLSConfig: tls}) - opts = append(opts, grpc.Creds(bundle.TransportCredentials())) + opts = append(opts, grpc.Creds(credentials.NewTransportCredential(tls))) } chainUnaryInterceptors := []grpc.UnaryServerInterceptor{ newLogUnaryInterceptor(s), @@ -59,11 +57,10 @@ func Server(s *etcdserver.EtcdServer, tls *tls.Config, interceptor grpc.UnarySer if s.Cfg.ExperimentalEnableDistributedTracing { chainUnaryInterceptors = append(chainUnaryInterceptors, otelgrpc.UnaryServerInterceptor(s.Cfg.ExperimentalTracerOptions...)) chainStreamInterceptors = append(chainStreamInterceptors, otelgrpc.StreamServerInterceptor(s.Cfg.ExperimentalTracerOptions...)) - } - opts = append(opts, grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(chainUnaryInterceptors...))) - opts = append(opts, grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(chainStreamInterceptors...))) + opts = append(opts, grpc.ChainUnaryInterceptor(chainUnaryInterceptors...)) + opts = append(opts, grpc.ChainStreamInterceptor(chainStreamInterceptors...)) opts = append(opts, grpc.MaxRecvMsgSize(int(s.Cfg.MaxRequestBytes+grpcOverheadBytes))) opts = append(opts, grpc.MaxSendMsgSize(maxSendBytes)) @@ -79,8 +76,8 @@ func Server(s *etcdserver.EtcdServer, tls *tls.Config, interceptor grpc.UnarySer hsrv := health.NewServer() healthNotifier := newHealthNotifier(hsrv, s) - pb.RegisterMaintenanceServer(grpcServer, NewMaintenanceServer(s, healthNotifier)) healthpb.RegisterHealthServer(grpcServer, hsrv) + pb.RegisterMaintenanceServer(grpcServer, NewMaintenanceServer(s, healthNotifier)) // set zero values for metrics registered for this grpc server grpc_prometheus.Register(grpcServer) diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/header.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/header.go index 112cc922ea156..8fe4e5863b8b3 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/header.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/header.go @@ -17,19 +17,20 @@ package v3rpc import ( pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/server/v3/etcdserver" + "go.etcd.io/etcd/server/v3/etcdserver/apply" ) type header struct { clusterID int64 memberID int64 - sg etcdserver.RaftStatusGetter + sg apply.RaftStatusGetter rev func() int64 } func newHeader(s *etcdserver.EtcdServer) header { return header{ clusterID: int64(s.Cluster().ID()), - memberID: int64(s.ID()), + memberID: int64(s.MemberID()), sg: s, rev: func() int64 { return s.KV().Rev() }, } diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/health.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/health.go index f58396d1c3782..2861e11e6d3fd 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/health.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/health.go @@ -15,10 +15,12 @@ package v3rpc import ( - "go.etcd.io/etcd/server/v3/etcdserver" "go.uber.org/zap" "google.golang.org/grpc/health" healthpb "google.golang.org/grpc/health/grpc_health_v1" + + "go.etcd.io/etcd/server/v3/etcdserver" + "go.etcd.io/etcd/server/v3/features" ) const ( @@ -34,7 +36,7 @@ func newHealthNotifier(hs *health.Server, s *etcdserver.EtcdServer) notifier { if hs == nil { panic("unexpected nil gRPC health server") } - hc := &healthNotifier{hs: hs, lg: s.Logger(), stopGRPCServiceOnDefrag: s.Cfg.ExperimentalStopGRPCServiceOnDefrag} + hc := &healthNotifier{hs: hs, lg: s.Logger(), stopGRPCServiceOnDefrag: s.FeatureEnabled(features.StopGRPCServiceOnDefrag)} // set grpc health server as serving status blindly since // the grpc server will serve iff s.ReadyNotify() is closed. hc.startServe() diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/interceptor.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/interceptor.go index 5c80fcf041ebb..65e068ebbfc53 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/interceptor.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/interceptor.go @@ -18,24 +18,24 @@ import ( "context" "sync" "time" + "unicode/utf8" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/raft/v3" - "go.etcd.io/etcd/server/v3/etcdserver" - "go.etcd.io/etcd/server/v3/etcdserver/api" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" + + pb "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" + "go.etcd.io/etcd/client/pkg/v3/types" + "go.etcd.io/etcd/server/v3/etcdserver" + "go.etcd.io/etcd/server/v3/etcdserver/api" + "go.etcd.io/raft/v3" ) const ( - maxNoLeaderCnt = 3 - warnUnaryRequestLatency = 300 * time.Millisecond - snapshotMethod = "/etcdserverpb.Maintenance/Snapshot" + maxNoLeaderCnt = 3 + snapshotMethod = "/etcdserverpb.Maintenance/Snapshot" ) type streamsMap struct { @@ -44,13 +44,13 @@ type streamsMap struct { } func newUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor { - return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) { if !api.IsCapabilityEnabled(api.V3rpcCapability) { return nil, rpctypes.ErrGRPCNotCapable } - if s.IsMemberExist(s.ID()) && s.IsLearner() && !isRPCSupportedForLearner(req) { - return nil, rpctypes.ErrGPRCNotSupportedForLearner + if s.IsMemberExist(s.MemberID()) && s.IsLearner() && !isRPCSupportedForLearner(req) { + return nil, rpctypes.ErrGRPCNotSupportedForLearner } md, ok := metadata.FromIncomingContext(ctx) @@ -59,6 +59,9 @@ func newUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor { if len(vs) > 0 { ver = vs[0] } + if !utf8.ValidString(ver) { + return nil, rpctypes.ErrGRPCInvalidClientAPIVersion + } clientRequests.WithLabelValues("unary", ver).Inc() if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader { @@ -73,24 +76,24 @@ func newUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor { } func newLogUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor { - return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) { startTime := time.Now() resp, err := handler(ctx, req) lg := s.Logger() - if lg != nil { // acquire stats if debug level is enabled or request is expensive - defer logUnaryRequestStats(ctx, lg, info, startTime, req, resp) + if lg != nil { // acquire stats if debug level is enabled or RequestInfo is expensive + defer logUnaryRequestStats(ctx, lg, s.Cfg.WarningUnaryRequestDuration, info, startTime, req, resp) } return resp, err } } -func logUnaryRequestStats(ctx context.Context, lg *zap.Logger, info *grpc.UnaryServerInfo, startTime time.Time, req interface{}, resp interface{}) { +func logUnaryRequestStats(ctx context.Context, lg *zap.Logger, warnLatency time.Duration, info *grpc.UnaryServerInfo, startTime time.Time, req any, resp any) { duration := time.Since(startTime) var enabledDebugLevel, expensiveRequest bool if lg.Core().Enabled(zap.DebugLevel) { enabledDebugLevel = true } - if duration > warnUnaryRequestLatency { + if duration > warnLatency { expensiveRequest = true } if !enabledDebugLevel && !expensiveRequest { @@ -210,13 +213,13 @@ func logExpensiveRequestStats(lg *zap.Logger, startTime time.Time, duration time func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor { smap := monitorLeader(s) - return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + return func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { if !api.IsCapabilityEnabled(api.V3rpcCapability) { return rpctypes.ErrGRPCNotCapable } - if s.IsMemberExist(s.ID()) && s.IsLearner() && info.FullMethod != snapshotMethod { // learner does not support stream RPC except Snapshot - return rpctypes.ErrGPRCNotSupportedForLearner + if s.IsMemberExist(s.MemberID()) && s.IsLearner() && info.FullMethod != snapshotMethod { // learner does not support stream RPC except Snapshot + return rpctypes.ErrGRPCNotSupportedForLearner } md, ok := metadata.FromIncomingContext(ss.Context()) @@ -225,6 +228,9 @@ func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor if len(vs) > 0 { ver = vs[0] } + if !utf8.ValidString(ver) { + return rpctypes.ErrGRPCInvalidClientAPIVersion + } clientRequests.WithLabelValues("stream", ver).Inc() if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader { diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/key.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/key.go index d1a7ee633455f..2c1de2a90de84 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/key.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/key.go @@ -115,6 +115,15 @@ func checkRangeRequest(r *pb.RangeRequest) error { if len(r.Key) == 0 { return rpctypes.ErrGRPCEmptyKey } + + if _, ok := pb.RangeRequest_SortOrder_name[int32(r.SortOrder)]; !ok { + return rpctypes.ErrGRPCInvalidSortOption + } + + if _, ok := pb.RangeRequest_SortTarget_name[int32(r.SortTarget)]; !ok { + return rpctypes.ErrGRPCInvalidSortOption + } + return nil } diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/lease.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/lease.go index e123dd2a37ca5..4d215e6308fa4 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/lease.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/lease.go @@ -16,14 +16,15 @@ package v3rpc import ( "context" + "errors" "io" + "go.uber.org/zap" + pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/server/v3/etcdserver" "go.etcd.io/etcd/server/v3/lease" - - "go.uber.org/zap" ) type LeaseServer struct { @@ -61,10 +62,10 @@ func (ls *LeaseServer) LeaseRevoke(ctx context.Context, rr *pb.LeaseRevokeReques func (ls *LeaseServer) LeaseTimeToLive(ctx context.Context, rr *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) { resp, err := ls.le.LeaseTimeToLive(ctx, rr) - if err != nil && err != lease.ErrLeaseNotFound { + if err != nil && !errors.Is(err, lease.ErrLeaseNotFound) { return nil, togRPCError(err) } - if err == lease.ErrLeaseNotFound { + if errors.Is(err, lease.ErrLeaseNotFound) { resp = &pb.LeaseTimeToLiveResponse{ Header: &pb.ResponseHeader{}, ID: rr.ID, @@ -77,10 +78,10 @@ func (ls *LeaseServer) LeaseTimeToLive(ctx context.Context, rr *pb.LeaseTimeToLi func (ls *LeaseServer) LeaseLeases(ctx context.Context, rr *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error) { resp, err := ls.le.LeaseLeases(ctx, rr) - if err != nil && err != lease.ErrLeaseNotFound { + if err != nil && !errors.Is(err, lease.ErrLeaseNotFound) { return nil, togRPCError(err) } - if err == lease.ErrLeaseNotFound { + if errors.Is(err, lease.ErrLeaseNotFound) { resp = &pb.LeaseLeasesResponse{ Header: &pb.ResponseHeader{}, Leases: []*pb.LeaseStatus{}, @@ -100,7 +101,7 @@ func (ls *LeaseServer) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) (err case <-stream.Context().Done(): // the only server-side cancellation is noleader for now. err = stream.Context().Err() - if err == context.Canceled { + if errors.Is(err, context.Canceled) { err = rpctypes.ErrGRPCNoLeader } } @@ -110,7 +111,7 @@ func (ls *LeaseServer) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) (err func (ls *LeaseServer) leaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error { for { req, err := stream.Recv() - if err == io.EOF { + if errors.Is(err, io.EOF) { return nil } if err != nil { @@ -133,7 +134,7 @@ func (ls *LeaseServer) leaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) erro ls.hdr.fill(resp.Header) ttl, err := ls.le.LeaseRenew(stream.Context(), lease.LeaseID(req.ID)) - if err == lease.ErrLeaseNotFound { + if errors.Is(err, lease.ErrLeaseNotFound) { err = nil ttl = 0 } diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/maintenance.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/maintenance.go index 7db17e1a00252..92b41c31703bb 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/maintenance.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/maintenance.go @@ -17,20 +17,25 @@ package v3rpc import ( "context" "crypto/sha256" + errorspkg "errors" "io" "time" "github.com/dustin/go-humanize" + "go.uber.org/zap" + pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/api/v3/version" - "go.etcd.io/etcd/raft/v3" - "go.etcd.io/etcd/server/v3/auth" + "go.etcd.io/etcd/server/v3/config" "go.etcd.io/etcd/server/v3/etcdserver" - "go.etcd.io/etcd/server/v3/mvcc" - "go.etcd.io/etcd/server/v3/mvcc/backend" - - "go.uber.org/zap" + "go.etcd.io/etcd/server/v3/etcdserver/apply" + "go.etcd.io/etcd/server/v3/etcdserver/errors" + serverversion "go.etcd.io/etcd/server/v3/etcdserver/version" + "go.etcd.io/etcd/server/v3/storage/backend" + "go.etcd.io/etcd/server/v3/storage/mvcc" + "go.etcd.io/etcd/server/v3/storage/schema" + "go.etcd.io/raft/v3" ) type KVGetter interface { @@ -56,36 +61,49 @@ type LeaderTransferrer interface { MoveLeader(ctx context.Context, lead, target uint64) error } -type AuthGetter interface { - AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error) - AuthStore() auth.AuthStore -} - type ClusterStatusGetter interface { IsLearner() bool } +type ConfigGetter interface { + Config() config.ServerConfig +} + type maintenanceServer struct { lg *zap.Logger - rg etcdserver.RaftStatusGetter + rg apply.RaftStatusGetter hasher mvcc.HashStorage - kg KVGetter bg BackendGetter a Alarmer lt LeaderTransferrer hdr header cs ClusterStatusGetter d Downgrader + vs serverversion.Server + cg ConfigGetter healthNotifier notifier } func NewMaintenanceServer(s *etcdserver.EtcdServer, healthNotifier notifier) pb.MaintenanceServer { - srv := &maintenanceServer{lg: s.Cfg.Logger, rg: s, hasher: s.KV().HashStorage(), kg: s, bg: s, a: s, lt: s, hdr: newHeader(s), cs: s, d: s, healthNotifier: healthNotifier} + srv := &maintenanceServer{ + lg: s.Cfg.Logger, + rg: s, + hasher: s.KV().HashStorage(), + bg: s, + a: s, + lt: s, + hdr: newHeader(s), + cs: s, + d: s, + vs: etcdserver.NewServerVersionAdapter(s), + healthNotifier: healthNotifier, + cg: s, + } if srv.lg == nil { srv.lg = zap.NewNop() } - return &authMaintenanceServer{srv, s} + return &authMaintenanceServer{srv, &AuthAdmin{s}} } func (ms *maintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) { @@ -95,7 +113,7 @@ func (ms *maintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRe err := ms.bg.Backend().Defrag() if err != nil { ms.lg.Warn("failed to defragment", zap.Error(err)) - return nil, err + return nil, togRPCError(err) } ms.lg.Info("finished defragment") return &pb.DefragmentResponse{}, nil @@ -105,6 +123,11 @@ func (ms *maintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRe const snapshotSendBufferSize = 32 * 1024 func (ms *maintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error { + ver := schema.ReadStorageVersion(ms.bg.Backend().ReadTx()) + storageVersion := "" + if ver != nil { + storageVersion = ver.String() + } snap := ms.bg.Backend().Snapshot() pr, pw := io.Pipe() @@ -130,6 +153,7 @@ func (ms *maintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance ms.lg.Info("sending database snapshot to client", zap.Int64("total-bytes", total), zap.String("size", size), + zap.String("storage-version", storageVersion), ) for total-sent > 0 { // buffer just holds read bytes from stream @@ -140,7 +164,7 @@ func (ms *maintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance buf := make([]byte, snapshotSendBufferSize) n, err := io.ReadFull(pr, buf) - if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { + if err != nil && !errorspkg.Is(err, io.EOF) && !errorspkg.Is(err, io.ErrUnexpectedEOF) { return togRPCError(err) } sent += int64(n) @@ -156,6 +180,7 @@ func (ms *maintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance resp := &pb.SnapshotResponse{ RemainingBytes: uint64(total - sent), Blob: buf[:n], + Version: storageVersion, } if err = srv.Send(resp); err != nil { return togRPCError(err) @@ -171,7 +196,7 @@ func (ms *maintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance zap.Int64("total-bytes", total), zap.Int("checksum-size", len(sha)), ) - hresp := &pb.SnapshotResponse{RemainingBytes: 0, Blob: sha} + hresp := &pb.SnapshotResponse{RemainingBytes: 0, Blob: sha, Version: storageVersion} if err := srv.Send(hresp); err != nil { return togRPCError(err) } @@ -179,7 +204,8 @@ func (ms *maintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance ms.lg.Info("successfully sent database snapshot to client", zap.Int64("total-bytes", total), zap.String("size", size), - zap.String("took", humanize.Time(start)), + zap.Duration("took", time.Since(start)), + zap.String("storage-version", storageVersion), ) return nil } @@ -200,7 +226,12 @@ func (ms *maintenanceServer) HashKV(ctx context.Context, r *pb.HashKVRequest) (* return nil, togRPCError(err) } - resp := &pb.HashKVResponse{Header: &pb.ResponseHeader{Revision: rev}, Hash: h.Hash, CompactRevision: h.CompactRevision} + resp := &pb.HashKVResponse{ + Header: &pb.ResponseHeader{Revision: rev}, + Hash: h.Hash, + CompactRevision: h.CompactRevision, + HashRevision: h.Revision, + } ms.hdr.fill(resp.Header) return resp, nil } @@ -230,9 +261,13 @@ func (ms *maintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) ( DbSize: ms.bg.Backend().Size(), DbSizeInUse: ms.bg.Backend().SizeInUse(), IsLearner: ms.cs.IsLearner(), + DbSizeQuota: ms.cg.Config().QuotaBackendBytes, + } + if storageVersion := ms.vs.GetStorageVersion(); storageVersion != nil { + resp.StorageVersion = storageVersion.String() } if resp.Leader == raft.None { - resp.Errors = append(resp.Errors, etcdserver.ErrNoLeader.Error()) + resp.Errors = append(resp.Errors, errors.ErrNoLeader.Error()) } for _, a := range ms.a.Alarms() { resp.Errors = append(resp.Errors, a.String()) @@ -241,7 +276,7 @@ func (ms *maintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) ( } func (ms *maintenanceServer) MoveLeader(ctx context.Context, tr *pb.MoveLeaderRequest) (*pb.MoveLeaderResponse, error) { - if ms.rg.ID() != ms.rg.Leader() { + if ms.rg.MemberID() != ms.rg.Leader() { return nil, rpctypes.ErrGRPCNotLeader } @@ -263,57 +298,60 @@ func (ms *maintenanceServer) Downgrade(ctx context.Context, r *pb.DowngradeReque type authMaintenanceServer struct { *maintenanceServer - ag AuthGetter -} - -func (ams *authMaintenanceServer) isAuthenticated(ctx context.Context) error { - authInfo, err := ams.ag.AuthInfoFromCtx(ctx) - if err != nil { - return err - } - - return ams.ag.AuthStore().IsAdminPermitted(authInfo) + *AuthAdmin } func (ams *authMaintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) { - if err := ams.isAuthenticated(ctx); err != nil { - return nil, err + if err := ams.isPermitted(ctx); err != nil { + return nil, togRPCError(err) } return ams.maintenanceServer.Defragment(ctx, sr) } func (ams *authMaintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error { - if err := ams.isAuthenticated(srv.Context()); err != nil { - return err + if err := ams.isPermitted(srv.Context()); err != nil { + return togRPCError(err) } return ams.maintenanceServer.Snapshot(sr, srv) } func (ams *authMaintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) { - if err := ams.isAuthenticated(ctx); err != nil { - return nil, err + if err := ams.isPermitted(ctx); err != nil { + return nil, togRPCError(err) } return ams.maintenanceServer.Hash(ctx, r) } func (ams *authMaintenanceServer) HashKV(ctx context.Context, r *pb.HashKVRequest) (*pb.HashKVResponse, error) { - if err := ams.isAuthenticated(ctx); err != nil { - return nil, err + if err := ams.isPermitted(ctx); err != nil { + return nil, togRPCError(err) } return ams.maintenanceServer.HashKV(ctx, r) } func (ams *authMaintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) { + if err := ams.isPermitted(ctx); err != nil { + return nil, togRPCError(err) + } + return ams.maintenanceServer.Status(ctx, ar) } func (ams *authMaintenanceServer) MoveLeader(ctx context.Context, tr *pb.MoveLeaderRequest) (*pb.MoveLeaderResponse, error) { + if err := ams.isPermitted(ctx); err != nil { + return nil, togRPCError(err) + } + return ams.maintenanceServer.MoveLeader(ctx, tr) } func (ams *authMaintenanceServer) Downgrade(ctx context.Context, r *pb.DowngradeRequest) (*pb.DowngradeResponse, error) { + if err := ams.isPermitted(ctx); err != nil { + return nil, togRPCError(err) + } + return ams.maintenanceServer.Downgrade(ctx, r) } diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/member.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/member.go index 54fcc24843d40..7fd68fe2d6f81 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/member.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/member.go @@ -106,7 +106,7 @@ func (cs *ClusterServer) MemberPromote(ctx context.Context, r *pb.MemberPromoteR } func (cs *ClusterServer) header() *pb.ResponseHeader { - return &pb.ResponseHeader{ClusterId: uint64(cs.cluster.ID()), MemberId: uint64(cs.server.ID()), RaftTerm: cs.server.Term()} + return &pb.ResponseHeader{ClusterId: uint64(cs.cluster.ID()), MemberId: uint64(cs.server.MemberID()), RaftTerm: cs.server.Term()} } func membersToProtoMembers(membs []*membership.Member) []*pb.Member { diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/quota.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/quota.go index 7f53bd966bd3d..13bb83ffcd65f 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/quota.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/quota.go @@ -21,6 +21,7 @@ import ( "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/client/pkg/v3/types" "go.etcd.io/etcd/server/v3/etcdserver" + "go.etcd.io/etcd/server/v3/storage" ) type quotaKVServer struct { @@ -29,14 +30,14 @@ type quotaKVServer struct { } type quotaAlarmer struct { - q etcdserver.Quota + q storage.Quota a Alarmer id types.ID } // check whether request satisfies the quota. If there is not enough space, // ignore request and raise the free space alarm. -func (qa *quotaAlarmer) check(ctx context.Context, r interface{}) error { +func (qa *quotaAlarmer) check(ctx context.Context, r any) error { if qa.q.Available(r) { return nil } @@ -52,7 +53,7 @@ func (qa *quotaAlarmer) check(ctx context.Context, r interface{}) error { func NewQuotaKVServer(s *etcdserver.EtcdServer) pb.KVServer { return "aKVServer{ NewKVServer(s), - quotaAlarmer{etcdserver.NewBackendQuota(s, "kv"), s, s.ID()}, + quotaAlarmer{newBackendQuota(s, "kv"), s, s.MemberID()}, } } @@ -85,6 +86,10 @@ func (s *quotaLeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequ func NewQuotaLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer { return "aLeaseServer{ NewLeaseServer(s), - quotaAlarmer{etcdserver.NewBackendQuota(s, "lease"), s, s.ID()}, + quotaAlarmer{newBackendQuota(s, "lease"), s, s.MemberID()}, } } + +func newBackendQuota(s *etcdserver.EtcdServer, name string) storage.Quota { + return storage.NewBackendQuota(s.Logger(), s.Cfg.QuotaBackendBytes, s.Backend(), name) +} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/util.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/util.go index b2389342686e9..2354b0cb2431e 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/util.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/util.go @@ -16,54 +16,56 @@ package v3rpc import ( "context" + errorspkg "errors" "strings" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/server/v3/auth" - "go.etcd.io/etcd/server/v3/etcdserver" "go.etcd.io/etcd/server/v3/etcdserver/api/membership" + "go.etcd.io/etcd/server/v3/etcdserver/errors" + "go.etcd.io/etcd/server/v3/etcdserver/version" "go.etcd.io/etcd/server/v3/lease" - "go.etcd.io/etcd/server/v3/mvcc" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" + "go.etcd.io/etcd/server/v3/storage/mvcc" ) var toGRPCErrorMap = map[error]error{ - membership.ErrIDRemoved: rpctypes.ErrGRPCMemberNotFound, - membership.ErrIDNotFound: rpctypes.ErrGRPCMemberNotFound, - membership.ErrIDExists: rpctypes.ErrGRPCMemberExist, - membership.ErrPeerURLexists: rpctypes.ErrGRPCPeerURLExist, - membership.ErrMemberNotLearner: rpctypes.ErrGRPCMemberNotLearner, - membership.ErrTooManyLearners: rpctypes.ErrGRPCTooManyLearners, - etcdserver.ErrNotEnoughStartedMembers: rpctypes.ErrMemberNotEnoughStarted, - etcdserver.ErrLearnerNotReady: rpctypes.ErrGRPCLearnerNotReady, - - mvcc.ErrCompacted: rpctypes.ErrGRPCCompacted, - mvcc.ErrFutureRev: rpctypes.ErrGRPCFutureRev, - etcdserver.ErrRequestTooLarge: rpctypes.ErrGRPCRequestTooLarge, - etcdserver.ErrNoSpace: rpctypes.ErrGRPCNoSpace, - etcdserver.ErrTooManyRequests: rpctypes.ErrTooManyRequests, - - etcdserver.ErrNoLeader: rpctypes.ErrGRPCNoLeader, - etcdserver.ErrNotLeader: rpctypes.ErrGRPCNotLeader, - etcdserver.ErrLeaderChanged: rpctypes.ErrGRPCLeaderChanged, - etcdserver.ErrStopped: rpctypes.ErrGRPCStopped, - etcdserver.ErrTimeout: rpctypes.ErrGRPCTimeout, - etcdserver.ErrTimeoutDueToLeaderFail: rpctypes.ErrGRPCTimeoutDueToLeaderFail, - etcdserver.ErrTimeoutDueToConnectionLost: rpctypes.ErrGRPCTimeoutDueToConnectionLost, - etcdserver.ErrTimeoutWaitAppliedIndex: rpctypes.ErrGRPCTimeoutWaitAppliedIndex, - etcdserver.ErrUnhealthy: rpctypes.ErrGRPCUnhealthy, - etcdserver.ErrKeyNotFound: rpctypes.ErrGRPCKeyNotFound, - etcdserver.ErrCorrupt: rpctypes.ErrGRPCCorrupt, - etcdserver.ErrBadLeaderTransferee: rpctypes.ErrGRPCBadLeaderTransferee, - - etcdserver.ErrClusterVersionUnavailable: rpctypes.ErrGRPCClusterVersionUnavailable, - etcdserver.ErrWrongDowngradeVersionFormat: rpctypes.ErrGRPCWrongDowngradeVersionFormat, - etcdserver.ErrInvalidDowngradeTargetVersion: rpctypes.ErrGRPCInvalidDowngradeTargetVersion, - etcdserver.ErrDowngradeInProcess: rpctypes.ErrGRPCDowngradeInProcess, - etcdserver.ErrNoInflightDowngrade: rpctypes.ErrGRPCNoInflightDowngrade, + membership.ErrIDRemoved: rpctypes.ErrGRPCMemberNotFound, + membership.ErrIDNotFound: rpctypes.ErrGRPCMemberNotFound, + membership.ErrIDExists: rpctypes.ErrGRPCMemberExist, + membership.ErrPeerURLexists: rpctypes.ErrGRPCPeerURLExist, + membership.ErrMemberNotLearner: rpctypes.ErrGRPCMemberNotLearner, + membership.ErrTooManyLearners: rpctypes.ErrGRPCTooManyLearners, + errors.ErrNotEnoughStartedMembers: rpctypes.ErrMemberNotEnoughStarted, + errors.ErrLearnerNotReady: rpctypes.ErrGRPCLearnerNotReady, + + mvcc.ErrCompacted: rpctypes.ErrGRPCCompacted, + mvcc.ErrFutureRev: rpctypes.ErrGRPCFutureRev, + errors.ErrRequestTooLarge: rpctypes.ErrGRPCRequestTooLarge, + errors.ErrNoSpace: rpctypes.ErrGRPCNoSpace, + errors.ErrTooManyRequests: rpctypes.ErrTooManyRequests, + + errors.ErrNoLeader: rpctypes.ErrGRPCNoLeader, + errors.ErrNotLeader: rpctypes.ErrGRPCNotLeader, + errors.ErrLeaderChanged: rpctypes.ErrGRPCLeaderChanged, + errors.ErrStopped: rpctypes.ErrGRPCStopped, + errors.ErrTimeout: rpctypes.ErrGRPCTimeout, + errors.ErrTimeoutDueToLeaderFail: rpctypes.ErrGRPCTimeoutDueToLeaderFail, + errors.ErrTimeoutDueToConnectionLost: rpctypes.ErrGRPCTimeoutDueToConnectionLost, + errors.ErrTimeoutWaitAppliedIndex: rpctypes.ErrGRPCTimeoutWaitAppliedIndex, + errors.ErrUnhealthy: rpctypes.ErrGRPCUnhealthy, + errors.ErrKeyNotFound: rpctypes.ErrGRPCKeyNotFound, + errors.ErrCorrupt: rpctypes.ErrGRPCCorrupt, + errors.ErrBadLeaderTransferee: rpctypes.ErrGRPCBadLeaderTransferee, + + errors.ErrClusterVersionUnavailable: rpctypes.ErrGRPCClusterVersionUnavailable, + errors.ErrWrongDowngradeVersionFormat: rpctypes.ErrGRPCWrongDowngradeVersionFormat, + version.ErrInvalidDowngradeTargetVersion: rpctypes.ErrGRPCInvalidDowngradeTargetVersion, + version.ErrDowngradeInProcess: rpctypes.ErrGRPCDowngradeInProcess, + version.ErrNoInflightDowngrade: rpctypes.ErrGRPCNoInflightDowngrade, lease.ErrLeaseNotFound: rpctypes.ErrGRPCLeaseNotFound, lease.ErrLeaseExists: rpctypes.ErrGRPCLeaseExist, @@ -94,7 +96,7 @@ var toGRPCErrorMap = map[error]error{ func togRPCError(err error) error { // let gRPC server convert to codes.Canceled, codes.DeadlineExceeded - if err == context.Canceled || err == context.DeadlineExceeded { + if errorspkg.Is(err, context.Canceled) || errorspkg.Is(err, context.DeadlineExceeded) { return err } grpcErr, ok := toGRPCErrorMap[err] @@ -137,7 +139,7 @@ func isClientCtxErr(ctxErr error, err error) bool { } // in v3.4, learner is allowed to serve serializable read and endpoint status -func isRPCSupportedForLearner(req interface{}) bool { +func isRPCSupportedForLearner(req any) bool { switch r := req.(type) { case *pb.StatusRequest: return true diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/watch.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/watch.go index ddbcd231bf091..fab02f56746b2 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/watch.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/watch.go @@ -16,21 +16,23 @@ package v3rpc import ( "context" - "fmt" + "errors" "io" "math/rand" "sync" "time" + "go.uber.org/zap" + pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/api/v3/mvccpb" "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" + "go.etcd.io/etcd/client/pkg/v3/verify" clientv3 "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/server/v3/auth" "go.etcd.io/etcd/server/v3/etcdserver" - "go.etcd.io/etcd/server/v3/mvcc" - - "go.uber.org/zap" + "go.etcd.io/etcd/server/v3/etcdserver/apply" + "go.etcd.io/etcd/server/v3/storage/mvcc" ) const minWatchProgressInterval = 100 * time.Millisecond @@ -43,7 +45,7 @@ type watchServer struct { maxRequestBytes int - sg etcdserver.RaftStatusGetter + sg apply.RaftStatusGetter watchable mvcc.WatchableKV ag AuthGetter } @@ -54,7 +56,7 @@ func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer { lg: s.Cfg.Logger, clusterID: int64(s.Cluster().ID()), - memberID: int64(s.ID()), + memberID: int64(s.MemberID()), maxRequestBytes: int(s.Cfg.MaxRequestBytes + grpcOverheadBytes), @@ -126,7 +128,7 @@ type serverWatchStream struct { maxRequestBytes int - sg etcdserver.RaftStatusGetter + sg apply.RaftStatusGetter watchable mvcc.WatchableKV ag AuthGetter @@ -210,13 +212,13 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) { // revisited. select { case err = <-errc: - if err == context.Canceled { + if errors.Is(err, context.Canceled) { err = rpctypes.ErrGRPCWatchCanceled } close(sws.ctrlStream) case <-stream.Context().Done(): err = stream.Context().Err() - if err == context.Canceled { + if errors.Is(err, context.Canceled) { err = rpctypes.ErrGRPCWatchCanceled } } @@ -240,7 +242,7 @@ func (sws *serverWatchStream) isWatchPermitted(wcr *pb.WatchCreateRequest) error func (sws *serverWatchStream) recvLoop() error { for { req, err := sws.gRPCStream.Recv() - if err == io.EOF { + if errors.Is(err, io.EOF) { return nil } if err != nil { @@ -271,15 +273,15 @@ func (sws *serverWatchStream) recvLoop() error { err := sws.isWatchPermitted(creq) if err != nil { var cancelReason string - switch err { - case auth.ErrInvalidAuthToken: + switch { + case errors.Is(err, auth.ErrInvalidAuthToken): cancelReason = rpctypes.ErrGRPCInvalidAuthToken.Error() - case auth.ErrAuthOldRevision: + case errors.Is(err, auth.ErrAuthOldRevision): cancelReason = rpctypes.ErrGRPCAuthOldRevision.Error() - case auth.ErrUserEmpty: + case errors.Is(err, auth.ErrUserEmpty): cancelReason = rpctypes.ErrGRPCUserEmpty.Error() default: - if err != auth.ErrPermissionDenied { + if !errors.Is(err, auth.ErrPermissionDenied) { sws.lg.Error("unexpected error code", zap.Error(err)) } cancelReason = rpctypes.ErrGRPCPermissionDenied.Error() @@ -345,11 +347,17 @@ func (sws *serverWatchStream) recvLoop() error { id := uv.CancelRequest.WatchId err := sws.watchStream.Cancel(mvcc.WatchID(id)) if err == nil { - sws.ctrlStream <- &pb.WatchResponse{ + wr := &pb.WatchResponse{ Header: sws.newResponseHeader(sws.watchStream.Rev()), WatchId: id, Canceled: true, } + select { + case sws.ctrlStream <- wr: + case <-sws.closec: + return nil + } + sws.mu.Lock() delete(sws.progress, mvcc.WatchID(id)) delete(sws.prevKV, mvcc.WatchID(id)) @@ -365,8 +373,9 @@ func (sws *serverWatchStream) recvLoop() error { } default: // we probably should not shutdown the entire stream when - // receive an valid command. + // receive an invalid command. // so just do nothing instead. + sws.lg.Sugar().Infof("invalid watch request type %T received in gRPC stream", uv) continue } } @@ -447,6 +456,7 @@ func (sws *serverWatchStream) sendLoop() { sws.mu.RUnlock() var serr error + // gofail: var beforeSendWatchResponse struct{} if !fragmented && !ok { serr = sws.gRPCStream.Send(wr) } else { @@ -488,9 +498,7 @@ func (sws *serverWatchStream) sendLoop() { // track id creation wid := mvcc.WatchID(c.WatchId) - if !(!(c.Canceled && c.Created) || wid == clientv3.InvalidWatchID) { - panic(fmt.Sprintf("unexpected watchId: %d, wanted: %d, since both 'Canceled' and 'Created' are true", wid, clientv3.InvalidWatchID)) - } + verify.Assert(!(c.Canceled && c.Created) || wid == clientv3.InvalidWatchID, "unexpected watchId: %d, wanted: %d, since both 'Canceled' and 'Created' are true", wid, clientv3.InvalidWatchID) if c.Canceled && wid != clientv3.InvalidWatchID { delete(ids, wid) diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply.go deleted file mode 100644 index 2ef151259c1bd..0000000000000 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply.go +++ /dev/null @@ -1,1166 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "bytes" - "context" - "fmt" - "sort" - "strconv" - "time" - - "github.com/coreos/go-semver/semver" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/membershippb" - "go.etcd.io/etcd/api/v3/mvccpb" - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/pkg/v3/traceutil" - "go.etcd.io/etcd/server/v3/auth" - "go.etcd.io/etcd/server/v3/etcdserver/api" - "go.etcd.io/etcd/server/v3/etcdserver/api/membership" - "go.etcd.io/etcd/server/v3/lease" - "go.etcd.io/etcd/server/v3/mvcc" - - "github.com/gogo/protobuf/proto" - "go.uber.org/zap" -) - -const ( - v3Version = "v3" -) - -type applyResult struct { - resp proto.Message - err error - // physc signals the physical effect of the request has completed in addition - // to being logically reflected by the node. Currently only used for - // Compaction requests. - physc <-chan struct{} - trace *traceutil.Trace -} - -// applierV3Internal is the interface for processing internal V3 raft request -type applierV3Internal interface { - ClusterVersionSet(r *membershippb.ClusterVersionSetRequest, shouldApplyV3 membership.ShouldApplyV3) - ClusterMemberAttrSet(r *membershippb.ClusterMemberAttrSetRequest, shouldApplyV3 membership.ShouldApplyV3) - DowngradeInfoSet(r *membershippb.DowngradeInfoSetRequest, shouldApplyV3 membership.ShouldApplyV3) -} - -// applierV3 is the interface for processing V3 raft messages -type applierV3 interface { - Apply(r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *applyResult - - Put(ctx context.Context, txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) - Range(ctx context.Context, txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) - DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) - Txn(ctx context.Context, rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) - Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, *traceutil.Trace, error) - - LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) - LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) - - LeaseCheckpoint(lc *pb.LeaseCheckpointRequest) (*pb.LeaseCheckpointResponse, error) - - Alarm(*pb.AlarmRequest) (*pb.AlarmResponse, error) - - Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error) - - AuthEnable() (*pb.AuthEnableResponse, error) - AuthDisable() (*pb.AuthDisableResponse, error) - AuthStatus() (*pb.AuthStatusResponse, error) - - UserAdd(ua *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) - UserDelete(ua *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) - UserChangePassword(ua *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) - UserGrantRole(ua *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) - UserGet(ua *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) - UserRevokeRole(ua *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) - RoleAdd(ua *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) - RoleGrantPermission(ua *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) - RoleGet(ua *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) - RoleRevokePermission(ua *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) - RoleDelete(ua *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) - UserList(ua *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) - RoleList(ua *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) -} - -type checkReqFunc func(mvcc.ReadView, *pb.RequestOp) error - -type applierV3backend struct { - s *EtcdServer - - checkPut checkReqFunc - checkRange checkReqFunc -} - -func (s *EtcdServer) newApplierV3Backend() applierV3 { - base := &applierV3backend{s: s} - base.checkPut = func(rv mvcc.ReadView, req *pb.RequestOp) error { - return base.checkRequestPut(rv, req) - } - base.checkRange = func(rv mvcc.ReadView, req *pb.RequestOp) error { - return base.checkRequestRange(rv, req) - } - return base -} - -func (s *EtcdServer) newApplierV3Internal() applierV3Internal { - base := &applierV3backend{s: s} - return base -} - -func (s *EtcdServer) newApplierV3() applierV3 { - return newAuthApplierV3( - s.AuthStore(), - newQuotaApplierV3(s, s.newApplierV3Backend()), - s.lessor, - ) -} - -func (a *applierV3backend) Apply(r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *applyResult { - op := "unknown" - ar := &applyResult{} - defer func(start time.Time) { - success := ar.err == nil || ar.err == mvcc.ErrCompacted - applySec.WithLabelValues(v3Version, op, strconv.FormatBool(success)).Observe(time.Since(start).Seconds()) - warnOfExpensiveRequest(a.s.Logger(), a.s.Cfg.WarningApplyDuration, start, &pb.InternalRaftStringer{Request: r}, ar.resp, ar.err) - if !success { - warnOfFailedRequest(a.s.Logger(), start, &pb.InternalRaftStringer{Request: r}, ar.resp, ar.err) - } - }(time.Now()) - - switch { - case r.ClusterVersionSet != nil: // Implemented in 3.5.x - op = "ClusterVersionSet" - a.s.applyV3Internal.ClusterVersionSet(r.ClusterVersionSet, shouldApplyV3) - return nil - case r.ClusterMemberAttrSet != nil: - op = "ClusterMemberAttrSet" // Implemented in 3.5.x - a.s.applyV3Internal.ClusterMemberAttrSet(r.ClusterMemberAttrSet, shouldApplyV3) - return nil - case r.DowngradeInfoSet != nil: - op = "DowngradeInfoSet" // Implemented in 3.5.x - a.s.applyV3Internal.DowngradeInfoSet(r.DowngradeInfoSet, shouldApplyV3) - return nil - } - - if !shouldApplyV3 { - return nil - } - - // call into a.s.applyV3.F instead of a.F so upper appliers can check individual calls - switch { - case r.Range != nil: - op = "Range" - ar.resp, ar.err = a.s.applyV3.Range(context.TODO(), nil, r.Range) - case r.Put != nil: - op = "Put" - ar.resp, ar.trace, ar.err = a.s.applyV3.Put(context.TODO(), nil, r.Put) - case r.DeleteRange != nil: - op = "DeleteRange" - ar.resp, ar.err = a.s.applyV3.DeleteRange(nil, r.DeleteRange) - case r.Txn != nil: - op = "Txn" - ar.resp, ar.trace, ar.err = a.s.applyV3.Txn(context.TODO(), r.Txn) - case r.Compaction != nil: - op = "Compaction" - ar.resp, ar.physc, ar.trace, ar.err = a.s.applyV3.Compaction(r.Compaction) - case r.LeaseGrant != nil: - op = "LeaseGrant" - ar.resp, ar.err = a.s.applyV3.LeaseGrant(r.LeaseGrant) - case r.LeaseRevoke != nil: - op = "LeaseRevoke" - ar.resp, ar.err = a.s.applyV3.LeaseRevoke(r.LeaseRevoke) - case r.LeaseCheckpoint != nil: - op = "LeaseCheckpoint" - ar.resp, ar.err = a.s.applyV3.LeaseCheckpoint(r.LeaseCheckpoint) - case r.Alarm != nil: - op = "Alarm" - ar.resp, ar.err = a.s.applyV3.Alarm(r.Alarm) - case r.Authenticate != nil: - op = "Authenticate" - ar.resp, ar.err = a.s.applyV3.Authenticate(r.Authenticate) - case r.AuthEnable != nil: - op = "AuthEnable" - ar.resp, ar.err = a.s.applyV3.AuthEnable() - case r.AuthDisable != nil: - op = "AuthDisable" - ar.resp, ar.err = a.s.applyV3.AuthDisable() - case r.AuthStatus != nil: - ar.resp, ar.err = a.s.applyV3.AuthStatus() - case r.AuthUserAdd != nil: - op = "AuthUserAdd" - ar.resp, ar.err = a.s.applyV3.UserAdd(r.AuthUserAdd) - case r.AuthUserDelete != nil: - op = "AuthUserDelete" - ar.resp, ar.err = a.s.applyV3.UserDelete(r.AuthUserDelete) - case r.AuthUserChangePassword != nil: - op = "AuthUserChangePassword" - ar.resp, ar.err = a.s.applyV3.UserChangePassword(r.AuthUserChangePassword) - case r.AuthUserGrantRole != nil: - op = "AuthUserGrantRole" - ar.resp, ar.err = a.s.applyV3.UserGrantRole(r.AuthUserGrantRole) - case r.AuthUserGet != nil: - op = "AuthUserGet" - ar.resp, ar.err = a.s.applyV3.UserGet(r.AuthUserGet) - case r.AuthUserRevokeRole != nil: - op = "AuthUserRevokeRole" - ar.resp, ar.err = a.s.applyV3.UserRevokeRole(r.AuthUserRevokeRole) - case r.AuthRoleAdd != nil: - op = "AuthRoleAdd" - ar.resp, ar.err = a.s.applyV3.RoleAdd(r.AuthRoleAdd) - case r.AuthRoleGrantPermission != nil: - op = "AuthRoleGrantPermission" - ar.resp, ar.err = a.s.applyV3.RoleGrantPermission(r.AuthRoleGrantPermission) - case r.AuthRoleGet != nil: - op = "AuthRoleGet" - ar.resp, ar.err = a.s.applyV3.RoleGet(r.AuthRoleGet) - case r.AuthRoleRevokePermission != nil: - op = "AuthRoleRevokePermission" - ar.resp, ar.err = a.s.applyV3.RoleRevokePermission(r.AuthRoleRevokePermission) - case r.AuthRoleDelete != nil: - op = "AuthRoleDelete" - ar.resp, ar.err = a.s.applyV3.RoleDelete(r.AuthRoleDelete) - case r.AuthUserList != nil: - op = "AuthUserList" - ar.resp, ar.err = a.s.applyV3.UserList(r.AuthUserList) - case r.AuthRoleList != nil: - op = "AuthRoleList" - ar.resp, ar.err = a.s.applyV3.RoleList(r.AuthRoleList) - default: - a.s.lg.Panic("not implemented apply", zap.Stringer("raft-request", r)) - } - return ar -} - -func (a *applierV3backend) Put(ctx context.Context, txn mvcc.TxnWrite, p *pb.PutRequest) (resp *pb.PutResponse, trace *traceutil.Trace, err error) { - resp = &pb.PutResponse{} - resp.Header = &pb.ResponseHeader{} - trace = traceutil.Get(ctx) - // create put tracing if the trace in context is empty - if trace.IsEmpty() { - trace = traceutil.New("put", - a.s.Logger(), - traceutil.Field{Key: "key", Value: string(p.Key)}, - traceutil.Field{Key: "req_size", Value: p.Size()}, - ) - } - val, leaseID := p.Value, lease.LeaseID(p.Lease) - if txn == nil { - if leaseID != lease.NoLease { - if l := a.s.lessor.Lookup(leaseID); l == nil { - return nil, nil, lease.ErrLeaseNotFound - } - } - txn = a.s.KV().Write(trace) - defer txn.End() - } - - var rr *mvcc.RangeResult - if p.IgnoreValue || p.IgnoreLease || p.PrevKv { - trace.StepWithFunction(func() { - rr, err = txn.Range(context.TODO(), p.Key, nil, mvcc.RangeOptions{}) - }, "get previous kv pair") - - if err != nil { - return nil, nil, err - } - } - if p.IgnoreValue || p.IgnoreLease { - if rr == nil || len(rr.KVs) == 0 { - // ignore_{lease,value} flag expects previous key-value pair - return nil, nil, ErrKeyNotFound - } - } - if p.IgnoreValue { - val = rr.KVs[0].Value - } - if p.IgnoreLease { - leaseID = lease.LeaseID(rr.KVs[0].Lease) - } - if p.PrevKv { - if rr != nil && len(rr.KVs) != 0 { - resp.PrevKv = &rr.KVs[0] - } - } - - resp.Header.Revision = txn.Put(p.Key, val, leaseID) - trace.AddField(traceutil.Field{Key: "response_revision", Value: resp.Header.Revision}) - return resp, trace, nil -} - -func (a *applierV3backend) DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { - resp := &pb.DeleteRangeResponse{} - resp.Header = &pb.ResponseHeader{} - end := mkGteRange(dr.RangeEnd) - - if txn == nil { - txn = a.s.kv.Write(traceutil.TODO()) - defer txn.End() - } - - if dr.PrevKv { - rr, err := txn.Range(context.TODO(), dr.Key, end, mvcc.RangeOptions{}) - if err != nil { - return nil, err - } - if rr != nil { - resp.PrevKvs = make([]*mvccpb.KeyValue, len(rr.KVs)) - for i := range rr.KVs { - resp.PrevKvs[i] = &rr.KVs[i] - } - } - } - - resp.Deleted, resp.Header.Revision = txn.DeleteRange(dr.Key, end) - return resp, nil -} - -func (a *applierV3backend) Range(ctx context.Context, txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) { - trace := traceutil.Get(ctx) - - resp := &pb.RangeResponse{} - resp.Header = &pb.ResponseHeader{} - - if txn == nil { - txn = a.s.kv.Read(mvcc.ConcurrentReadTxMode, trace) - defer txn.End() - } - - limit := r.Limit - if r.SortOrder != pb.RangeRequest_NONE || - r.MinModRevision != 0 || r.MaxModRevision != 0 || - r.MinCreateRevision != 0 || r.MaxCreateRevision != 0 { - // fetch everything; sort and truncate afterwards - limit = 0 - } - if limit > 0 { - // fetch one extra for 'more' flag - limit = limit + 1 - } - - ro := mvcc.RangeOptions{ - Limit: limit, - Rev: r.Revision, - Count: r.CountOnly, - } - - rr, err := txn.Range(ctx, r.Key, mkGteRange(r.RangeEnd), ro) - if err != nil { - return nil, err - } - - if r.MaxModRevision != 0 { - f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision > r.MaxModRevision } - pruneKVs(rr, f) - } - if r.MinModRevision != 0 { - f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision < r.MinModRevision } - pruneKVs(rr, f) - } - if r.MaxCreateRevision != 0 { - f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision > r.MaxCreateRevision } - pruneKVs(rr, f) - } - if r.MinCreateRevision != 0 { - f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision < r.MinCreateRevision } - pruneKVs(rr, f) - } - - sortOrder := r.SortOrder - if r.SortTarget != pb.RangeRequest_KEY && sortOrder == pb.RangeRequest_NONE { - // Since current mvcc.Range implementation returns results - // sorted by keys in lexiographically ascending order, - // sort ASCEND by default only when target is not 'KEY' - sortOrder = pb.RangeRequest_ASCEND - } - if sortOrder != pb.RangeRequest_NONE { - var sorter sort.Interface - switch { - case r.SortTarget == pb.RangeRequest_KEY: - sorter = &kvSortByKey{&kvSort{rr.KVs}} - case r.SortTarget == pb.RangeRequest_VERSION: - sorter = &kvSortByVersion{&kvSort{rr.KVs}} - case r.SortTarget == pb.RangeRequest_CREATE: - sorter = &kvSortByCreate{&kvSort{rr.KVs}} - case r.SortTarget == pb.RangeRequest_MOD: - sorter = &kvSortByMod{&kvSort{rr.KVs}} - case r.SortTarget == pb.RangeRequest_VALUE: - sorter = &kvSortByValue{&kvSort{rr.KVs}} - } - switch { - case sortOrder == pb.RangeRequest_ASCEND: - sort.Sort(sorter) - case sortOrder == pb.RangeRequest_DESCEND: - sort.Sort(sort.Reverse(sorter)) - } - } - - if r.Limit > 0 && len(rr.KVs) > int(r.Limit) { - rr.KVs = rr.KVs[:r.Limit] - resp.More = true - } - trace.Step("filter and sort the key-value pairs") - resp.Header.Revision = rr.Rev - resp.Count = int64(rr.Count) - resp.Kvs = make([]*mvccpb.KeyValue, len(rr.KVs)) - for i := range rr.KVs { - if r.KeysOnly { - rr.KVs[i].Value = nil - } - resp.Kvs[i] = &rr.KVs[i] - } - trace.Step("assemble the response") - return resp, nil -} - -func (a *applierV3backend) Txn(ctx context.Context, rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) { - lg := a.s.Logger() - trace := traceutil.Get(ctx) - if trace.IsEmpty() { - trace = traceutil.New("transaction", a.s.Logger()) - ctx = context.WithValue(ctx, traceutil.TraceKey, trace) - } - isWrite := !isTxnReadonly(rt) - - // When the transaction contains write operations, we use ReadTx instead of - // ConcurrentReadTx to avoid extra overhead of copying buffer. - var txn mvcc.TxnWrite - if isWrite && a.s.Cfg.ExperimentalTxnModeWriteWithSharedBuffer { - txn = mvcc.NewReadOnlyTxnWrite(a.s.KV().Read(mvcc.SharedBufReadTxMode, trace)) - } else { - txn = mvcc.NewReadOnlyTxnWrite(a.s.KV().Read(mvcc.ConcurrentReadTxMode, trace)) - } - - var txnPath []bool - trace.StepWithFunction( - func() { - txnPath = compareToPath(txn, rt) - }, - "compare", - ) - - if isWrite { - trace.AddField(traceutil.Field{Key: "read_only", Value: false}) - if _, err := checkRequests(txn, rt, txnPath, a.checkPut); err != nil { - txn.End() - return nil, nil, err - } - } - if _, err := checkRequests(txn, rt, txnPath, a.checkRange); err != nil { - txn.End() - return nil, nil, err - } - trace.Step("check requests") - txnResp, _ := newTxnResp(rt, txnPath) - - // When executing mutable txn ops, etcd must hold the txn lock so - // readers do not see any intermediate results. Since writes are - // serialized on the raft loop, the revision in the read view will - // be the revision of the write txn. - if isWrite { - txn.End() - txn = a.s.KV().Write(trace) - } - _, err := a.applyTxn(ctx, txn, rt, txnPath, txnResp) - if err != nil { - if isWrite { - // end txn to release locks before panic - txn.End() - // When txn with write operations starts it has to be successful - // We don't have a way to recover state in case of write failure - lg.Panic("unexpected error during txn with writes", zap.Error(err)) - } else { - lg.Error("unexpected error during readonly txn", zap.Error(err)) - } - } - rev := txn.Rev() - if len(txn.Changes()) != 0 { - rev++ - } - txn.End() - - txnResp.Header.Revision = rev - trace.AddField( - traceutil.Field{Key: "number_of_response", Value: len(txnResp.Responses)}, - traceutil.Field{Key: "response_revision", Value: txnResp.Header.Revision}, - ) - return txnResp, trace, err -} - -// newTxnResp allocates a txn response for a txn request given a path. -func newTxnResp(rt *pb.TxnRequest, txnPath []bool) (txnResp *pb.TxnResponse, txnCount int) { - reqs := rt.Success - if !txnPath[0] { - reqs = rt.Failure - } - resps := make([]*pb.ResponseOp, len(reqs)) - txnResp = &pb.TxnResponse{ - Responses: resps, - Succeeded: txnPath[0], - Header: &pb.ResponseHeader{}, - } - for i, req := range reqs { - switch tv := req.Request.(type) { - case *pb.RequestOp_RequestRange: - resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponseRange{}} - case *pb.RequestOp_RequestPut: - resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponsePut{}} - case *pb.RequestOp_RequestDeleteRange: - resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponseDeleteRange{}} - case *pb.RequestOp_RequestTxn: - resp, txns := newTxnResp(tv.RequestTxn, txnPath[1:]) - resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponseTxn{ResponseTxn: resp}} - txnPath = txnPath[1+txns:] - txnCount += txns + 1 - default: - } - } - return txnResp, txnCount -} - -func compareToPath(rv mvcc.ReadView, rt *pb.TxnRequest) []bool { - txnPath := make([]bool, 1) - ops := rt.Success - if txnPath[0] = applyCompares(rv, rt.Compare); !txnPath[0] { - ops = rt.Failure - } - for _, op := range ops { - tv, ok := op.Request.(*pb.RequestOp_RequestTxn) - if !ok || tv.RequestTxn == nil { - continue - } - txnPath = append(txnPath, compareToPath(rv, tv.RequestTxn)...) - } - return txnPath -} - -func applyCompares(rv mvcc.ReadView, cmps []*pb.Compare) bool { - for _, c := range cmps { - if !applyCompare(rv, c) { - return false - } - } - return true -} - -// applyCompare applies the compare request. -// If the comparison succeeds, it returns true. Otherwise, returns false. -func applyCompare(rv mvcc.ReadView, c *pb.Compare) bool { - // TODO: possible optimizations - // * chunk reads for large ranges to conserve memory - // * rewrite rules for common patterns: - // ex. "[a, b) createrev > 0" => "limit 1 /\ kvs > 0" - // * caching - rr, err := rv.Range(context.TODO(), c.Key, mkGteRange(c.RangeEnd), mvcc.RangeOptions{}) - if err != nil { - return false - } - if len(rr.KVs) == 0 { - if c.Target == pb.Compare_VALUE { - // Always fail if comparing a value on a key/keys that doesn't exist; - // nil == empty string in grpc; no way to represent missing value - return false - } - return compareKV(c, mvccpb.KeyValue{}) - } - for _, kv := range rr.KVs { - if !compareKV(c, kv) { - return false - } - } - return true -} - -func compareKV(c *pb.Compare, ckv mvccpb.KeyValue) bool { - var result int - rev := int64(0) - switch c.Target { - case pb.Compare_VALUE: - v := []byte{} - if tv, _ := c.TargetUnion.(*pb.Compare_Value); tv != nil { - v = tv.Value - } - result = bytes.Compare(ckv.Value, v) - case pb.Compare_CREATE: - if tv, _ := c.TargetUnion.(*pb.Compare_CreateRevision); tv != nil { - rev = tv.CreateRevision - } - result = compareInt64(ckv.CreateRevision, rev) - case pb.Compare_MOD: - if tv, _ := c.TargetUnion.(*pb.Compare_ModRevision); tv != nil { - rev = tv.ModRevision - } - result = compareInt64(ckv.ModRevision, rev) - case pb.Compare_VERSION: - if tv, _ := c.TargetUnion.(*pb.Compare_Version); tv != nil { - rev = tv.Version - } - result = compareInt64(ckv.Version, rev) - case pb.Compare_LEASE: - if tv, _ := c.TargetUnion.(*pb.Compare_Lease); tv != nil { - rev = tv.Lease - } - result = compareInt64(ckv.Lease, rev) - } - switch c.Result { - case pb.Compare_EQUAL: - return result == 0 - case pb.Compare_NOT_EQUAL: - return result != 0 - case pb.Compare_GREATER: - return result > 0 - case pb.Compare_LESS: - return result < 0 - } - return true -} - -func (a *applierV3backend) applyTxn(ctx context.Context, txn mvcc.TxnWrite, rt *pb.TxnRequest, txnPath []bool, tresp *pb.TxnResponse) (txns int, err error) { - trace := traceutil.Get(ctx) - reqs := rt.Success - if !txnPath[0] { - reqs = rt.Failure - } - - for i, req := range reqs { - respi := tresp.Responses[i].Response - switch tv := req.Request.(type) { - case *pb.RequestOp_RequestRange: - trace.StartSubTrace( - traceutil.Field{Key: "req_type", Value: "range"}, - traceutil.Field{Key: "range_begin", Value: string(tv.RequestRange.Key)}, - traceutil.Field{Key: "range_end", Value: string(tv.RequestRange.RangeEnd)}) - resp, err := a.Range(ctx, txn, tv.RequestRange) - if err != nil { - return 0, fmt.Errorf("applyTxn: failed Range: %w", err) - } - respi.(*pb.ResponseOp_ResponseRange).ResponseRange = resp - trace.StopSubTrace() - case *pb.RequestOp_RequestPut: - trace.StartSubTrace( - traceutil.Field{Key: "req_type", Value: "put"}, - traceutil.Field{Key: "key", Value: string(tv.RequestPut.Key)}, - traceutil.Field{Key: "req_size", Value: tv.RequestPut.Size()}) - resp, _, err := a.Put(ctx, txn, tv.RequestPut) - if err != nil { - return 0, fmt.Errorf("applyTxn: failed Put: %w", err) - } - respi.(*pb.ResponseOp_ResponsePut).ResponsePut = resp - trace.StopSubTrace() - case *pb.RequestOp_RequestDeleteRange: - resp, err := a.DeleteRange(txn, tv.RequestDeleteRange) - if err != nil { - return 0, fmt.Errorf("applyTxn: failed DeleteRange: %w", err) - } - respi.(*pb.ResponseOp_ResponseDeleteRange).ResponseDeleteRange = resp - case *pb.RequestOp_RequestTxn: - resp := respi.(*pb.ResponseOp_ResponseTxn).ResponseTxn - applyTxns, err := a.applyTxn(ctx, txn, tv.RequestTxn, txnPath[1:], resp) - if err != nil { - // don't wrap the error. It's a recursive call and err should be already wrapped - return 0, err - } - txns += applyTxns + 1 - txnPath = txnPath[applyTxns+1:] - default: - // empty union - } - } - return txns, nil -} - -func (a *applierV3backend) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, *traceutil.Trace, error) { - resp := &pb.CompactionResponse{} - resp.Header = &pb.ResponseHeader{} - trace := traceutil.New("compact", - a.s.Logger(), - traceutil.Field{Key: "revision", Value: compaction.Revision}, - ) - - ch, err := a.s.KV().Compact(trace, compaction.Revision) - if err != nil { - return nil, ch, nil, err - } - // get the current revision. which key to get is not important. - rr, _ := a.s.KV().Range(context.TODO(), []byte("compaction"), nil, mvcc.RangeOptions{}) - resp.Header.Revision = rr.Rev - return resp, ch, trace, err -} - -func (a *applierV3backend) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { - l, err := a.s.lessor.Grant(lease.LeaseID(lc.ID), lc.TTL) - resp := &pb.LeaseGrantResponse{} - if err == nil { - resp.ID = int64(l.ID) - resp.TTL = l.TTL() - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { - err := a.s.lessor.Revoke(lease.LeaseID(lc.ID)) - return &pb.LeaseRevokeResponse{Header: newHeader(a.s)}, err -} - -func (a *applierV3backend) LeaseCheckpoint(lc *pb.LeaseCheckpointRequest) (*pb.LeaseCheckpointResponse, error) { - for _, c := range lc.Checkpoints { - err := a.s.lessor.Checkpoint(lease.LeaseID(c.ID), c.Remaining_TTL) - if err != nil { - return &pb.LeaseCheckpointResponse{Header: newHeader(a.s)}, err - } - } - return &pb.LeaseCheckpointResponse{Header: newHeader(a.s)}, nil -} - -func (a *applierV3backend) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error) { - resp := &pb.AlarmResponse{} - oldCount := len(a.s.alarmStore.Get(ar.Alarm)) - - lg := a.s.Logger() - switch ar.Action { - case pb.AlarmRequest_GET: - resp.Alarms = a.s.alarmStore.Get(ar.Alarm) - case pb.AlarmRequest_ACTIVATE: - if ar.Alarm == pb.AlarmType_NONE { - break - } - m := a.s.alarmStore.Activate(types.ID(ar.MemberID), ar.Alarm) - if m == nil { - break - } - resp.Alarms = append(resp.Alarms, m) - activated := oldCount == 0 && len(a.s.alarmStore.Get(m.Alarm)) == 1 - if !activated { - break - } - - lg.Warn("alarm raised", zap.String("alarm", m.Alarm.String()), zap.String("from", types.ID(m.MemberID).String())) - switch m.Alarm { - case pb.AlarmType_CORRUPT: - a.s.applyV3 = newApplierV3Corrupt(a) - case pb.AlarmType_NOSPACE: - a.s.applyV3 = newApplierV3Capped(a) - default: - lg.Panic("unimplemented alarm activation", zap.String("alarm", fmt.Sprintf("%+v", m))) - } - case pb.AlarmRequest_DEACTIVATE: - m := a.s.alarmStore.Deactivate(types.ID(ar.MemberID), ar.Alarm) - if m == nil { - break - } - resp.Alarms = append(resp.Alarms, m) - deactivated := oldCount > 0 && len(a.s.alarmStore.Get(ar.Alarm)) == 0 - if !deactivated { - break - } - - switch m.Alarm { - case pb.AlarmType_NOSPACE, pb.AlarmType_CORRUPT: - // TODO: check kv hash before deactivating CORRUPT? - lg.Warn("alarm disarmed", zap.String("alarm", m.Alarm.String()), zap.String("from", types.ID(m.MemberID).String())) - a.s.applyV3 = a.s.newApplierV3() - default: - lg.Warn("unimplemented alarm deactivation", zap.String("alarm", fmt.Sprintf("%+v", m))) - } - default: - return nil, nil - } - return resp, nil -} - -type applierV3Capped struct { - applierV3 - q backendQuota -} - -// newApplierV3Capped creates an applyV3 that will reject Puts and transactions -// with Puts so that the number of keys in the store is capped. -func newApplierV3Capped(base applierV3) applierV3 { return &applierV3Capped{applierV3: base} } - -func (a *applierV3Capped) Put(ctx context.Context, txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) { - return nil, nil, ErrNoSpace -} - -func (a *applierV3Capped) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) { - if a.q.Cost(r) > 0 { - return nil, nil, ErrNoSpace - } - return a.applierV3.Txn(ctx, r) -} - -func (a *applierV3Capped) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { - return nil, ErrNoSpace -} - -func (a *applierV3backend) AuthEnable() (*pb.AuthEnableResponse, error) { - err := a.s.AuthStore().AuthEnable() - if err != nil { - return nil, err - } - return &pb.AuthEnableResponse{Header: newHeader(a.s)}, nil -} - -func (a *applierV3backend) AuthDisable() (*pb.AuthDisableResponse, error) { - a.s.AuthStore().AuthDisable() - return &pb.AuthDisableResponse{Header: newHeader(a.s)}, nil -} - -func (a *applierV3backend) AuthStatus() (*pb.AuthStatusResponse, error) { - enabled := a.s.AuthStore().IsAuthEnabled() - authRevision := a.s.AuthStore().Revision() - return &pb.AuthStatusResponse{Header: newHeader(a.s), Enabled: enabled, AuthRevision: authRevision}, nil -} - -func (a *applierV3backend) Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error) { - ctx := context.WithValue(context.WithValue(a.s.ctx, auth.AuthenticateParamIndex{}, a.s.consistIndex.ConsistentIndex()), auth.AuthenticateParamSimpleTokenPrefix{}, r.SimpleToken) - resp, err := a.s.AuthStore().Authenticate(ctx, r.Name, r.Password) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) { - resp, err := a.s.AuthStore().UserAdd(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) { - resp, err := a.s.AuthStore().UserDelete(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) { - resp, err := a.s.AuthStore().UserChangePassword(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) { - resp, err := a.s.AuthStore().UserGrantRole(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { - resp, err := a.s.AuthStore().UserGet(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) { - resp, err := a.s.AuthStore().UserRevokeRole(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) { - resp, err := a.s.AuthStore().RoleAdd(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) { - resp, err := a.s.AuthStore().RoleGrantPermission(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) { - resp, err := a.s.AuthStore().RoleGet(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) { - resp, err := a.s.AuthStore().RoleRevokePermission(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) { - resp, err := a.s.AuthStore().RoleDelete(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) { - resp, err := a.s.AuthStore().UserList(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) { - resp, err := a.s.AuthStore().RoleList(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) ClusterVersionSet(r *membershippb.ClusterVersionSetRequest, shouldApplyV3 membership.ShouldApplyV3) { - a.s.cluster.SetVersion(semver.Must(semver.NewVersion(r.Ver)), api.UpdateCapability, shouldApplyV3) -} - -func (a *applierV3backend) ClusterMemberAttrSet(r *membershippb.ClusterMemberAttrSetRequest, shouldApplyV3 membership.ShouldApplyV3) { - a.s.cluster.UpdateAttributes( - types.ID(r.Member_ID), - membership.Attributes{ - Name: r.MemberAttributes.Name, - ClientURLs: r.MemberAttributes.ClientUrls, - }, - shouldApplyV3, - ) -} - -func (a *applierV3backend) DowngradeInfoSet(r *membershippb.DowngradeInfoSetRequest, shouldApplyV3 membership.ShouldApplyV3) { - d := membership.DowngradeInfo{Enabled: false} - if r.Enabled { - d = membership.DowngradeInfo{Enabled: true, TargetVersion: r.Ver} - } - a.s.cluster.SetDowngradeInfo(&d, shouldApplyV3) -} - -type quotaApplierV3 struct { - applierV3 - q Quota -} - -func newQuotaApplierV3(s *EtcdServer, app applierV3) applierV3 { - return "aApplierV3{app, NewBackendQuota(s, "v3-applier")} -} - -func (a *quotaApplierV3) Put(ctx context.Context, txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) { - ok := a.q.Available(p) - resp, trace, err := a.applierV3.Put(ctx, txn, p) - if err == nil && !ok { - err = ErrNoSpace - } - return resp, trace, err -} - -func (a *quotaApplierV3) Txn(ctx context.Context, rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) { - ok := a.q.Available(rt) - resp, trace, err := a.applierV3.Txn(ctx, rt) - if err == nil && !ok { - err = ErrNoSpace - } - return resp, trace, err -} - -func (a *quotaApplierV3) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { - ok := a.q.Available(lc) - resp, err := a.applierV3.LeaseGrant(lc) - if err == nil && !ok { - err = ErrNoSpace - } - return resp, err -} - -type kvSort struct{ kvs []mvccpb.KeyValue } - -func (s *kvSort) Swap(i, j int) { - t := s.kvs[i] - s.kvs[i] = s.kvs[j] - s.kvs[j] = t -} -func (s *kvSort) Len() int { return len(s.kvs) } - -type kvSortByKey struct{ *kvSort } - -func (s *kvSortByKey) Less(i, j int) bool { - return bytes.Compare(s.kvs[i].Key, s.kvs[j].Key) < 0 -} - -type kvSortByVersion struct{ *kvSort } - -func (s *kvSortByVersion) Less(i, j int) bool { - return (s.kvs[i].Version - s.kvs[j].Version) < 0 -} - -type kvSortByCreate struct{ *kvSort } - -func (s *kvSortByCreate) Less(i, j int) bool { - return (s.kvs[i].CreateRevision - s.kvs[j].CreateRevision) < 0 -} - -type kvSortByMod struct{ *kvSort } - -func (s *kvSortByMod) Less(i, j int) bool { - return (s.kvs[i].ModRevision - s.kvs[j].ModRevision) < 0 -} - -type kvSortByValue struct{ *kvSort } - -func (s *kvSortByValue) Less(i, j int) bool { - return bytes.Compare(s.kvs[i].Value, s.kvs[j].Value) < 0 -} - -func checkRequests(rv mvcc.ReadView, rt *pb.TxnRequest, txnPath []bool, f checkReqFunc) (int, error) { - txnCount := 0 - reqs := rt.Success - if !txnPath[0] { - reqs = rt.Failure - } - for _, req := range reqs { - if tv, ok := req.Request.(*pb.RequestOp_RequestTxn); ok && tv.RequestTxn != nil { - txns, err := checkRequests(rv, tv.RequestTxn, txnPath[1:], f) - if err != nil { - return 0, err - } - txnCount += txns + 1 - txnPath = txnPath[txns+1:] - continue - } - if err := f(rv, req); err != nil { - return 0, err - } - } - return txnCount, nil -} - -func (a *applierV3backend) checkRequestPut(rv mvcc.ReadView, reqOp *pb.RequestOp) error { - tv, ok := reqOp.Request.(*pb.RequestOp_RequestPut) - if !ok || tv.RequestPut == nil { - return nil - } - req := tv.RequestPut - if req.IgnoreValue || req.IgnoreLease { - // expects previous key-value, error if not exist - rr, err := rv.Range(context.TODO(), req.Key, nil, mvcc.RangeOptions{}) - if err != nil { - return err - } - if rr == nil || len(rr.KVs) == 0 { - return ErrKeyNotFound - } - } - if lease.LeaseID(req.Lease) != lease.NoLease { - if l := a.s.lessor.Lookup(lease.LeaseID(req.Lease)); l == nil { - return lease.ErrLeaseNotFound - } - } - return nil -} - -func (a *applierV3backend) checkRequestRange(rv mvcc.ReadView, reqOp *pb.RequestOp) error { - tv, ok := reqOp.Request.(*pb.RequestOp_RequestRange) - if !ok || tv.RequestRange == nil { - return nil - } - req := tv.RequestRange - switch { - case req.Revision == 0: - return nil - case req.Revision > rv.Rev(): - return mvcc.ErrFutureRev - case req.Revision < rv.FirstRev(): - return mvcc.ErrCompacted - } - return nil -} - -func compareInt64(a, b int64) int { - switch { - case a < b: - return -1 - case a > b: - return 1 - default: - return 0 - } -} - -// mkGteRange determines if the range end is a >= range. This works around grpc -// sending empty byte strings as nil; >= is encoded in the range end as '\0'. -// If it is a GTE range, then []byte{} is returned to indicate the empty byte -// string (vs nil being no byte string). -func mkGteRange(rangeEnd []byte) []byte { - if len(rangeEnd) == 1 && rangeEnd[0] == 0 { - return []byte{} - } - return rangeEnd -} - -func noSideEffect(r *pb.InternalRaftRequest) bool { - return r.Range != nil || r.AuthUserGet != nil || r.AuthRoleGet != nil || r.AuthStatus != nil -} - -func removeNeedlessRangeReqs(txn *pb.TxnRequest) { - f := func(ops []*pb.RequestOp) []*pb.RequestOp { - j := 0 - for i := 0; i < len(ops); i++ { - if _, ok := ops[i].Request.(*pb.RequestOp_RequestRange); ok { - continue - } - ops[j] = ops[i] - j++ - } - - return ops[:j] - } - - txn.Success = f(txn.Success) - txn.Failure = f(txn.Failure) -} - -func pruneKVs(rr *mvcc.RangeResult, isPrunable func(*mvccpb.KeyValue) bool) { - j := 0 - for i := range rr.KVs { - rr.KVs[j] = rr.KVs[i] - if !isPrunable(&rr.KVs[i]) { - j++ - } - } - rr.KVs = rr.KVs[:j] -} - -func newHeader(s *EtcdServer) *pb.ResponseHeader { - return &pb.ResponseHeader{ - ClusterId: uint64(s.Cluster().ID()), - MemberId: uint64(s.ID()), - Revision: s.KV().Rev(), - RaftTerm: s.Term(), - } -} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply/apply.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply/apply.go new file mode 100644 index 0000000000000..7629787dbbba6 --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply/apply.go @@ -0,0 +1,491 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package apply + +import ( + "context" + + "github.com/coreos/go-semver/semver" + "github.com/gogo/protobuf/proto" + "go.uber.org/zap" + + pb "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.etcd.io/etcd/api/v3/membershippb" + "go.etcd.io/etcd/client/pkg/v3/types" + "go.etcd.io/etcd/pkg/v3/traceutil" + "go.etcd.io/etcd/server/v3/auth" + "go.etcd.io/etcd/server/v3/etcdserver/api" + "go.etcd.io/etcd/server/v3/etcdserver/api/membership" + "go.etcd.io/etcd/server/v3/etcdserver/api/v3alarm" + "go.etcd.io/etcd/server/v3/etcdserver/cindex" + "go.etcd.io/etcd/server/v3/etcdserver/errors" + mvcctxn "go.etcd.io/etcd/server/v3/etcdserver/txn" + "go.etcd.io/etcd/server/v3/etcdserver/version" + "go.etcd.io/etcd/server/v3/lease" + serverstorage "go.etcd.io/etcd/server/v3/storage" + "go.etcd.io/etcd/server/v3/storage/backend" + "go.etcd.io/etcd/server/v3/storage/mvcc" +) + +const ( + v3Version = "v3" +) + +// RaftStatusGetter represents etcd server and Raft progress. +type RaftStatusGetter interface { + MemberID() types.ID + Leader() types.ID + CommittedIndex() uint64 + AppliedIndex() uint64 + Term() uint64 +} + +type Result struct { + Resp proto.Message + Err error + // Physc signals the physical effect of the request has completed in addition + // to being logically reflected by the node. Currently, only used for + // Compaction requests. + Physc <-chan struct{} + Trace *traceutil.Trace +} + +type applyFunc func(r *pb.InternalRaftRequest) *Result + +// applierV3 is the interface for processing V3 raft messages +type applierV3 interface { + // Apply executes the generic portion of application logic for the current applier, but + // delegates the actual execution to the applyFunc method. + Apply(r *pb.InternalRaftRequest, applyFunc applyFunc) *Result + + Put(p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) + Range(r *pb.RangeRequest) (*pb.RangeResponse, *traceutil.Trace, error) + DeleteRange(dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, *traceutil.Trace, error) + Txn(rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) + Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, *traceutil.Trace, error) + + LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) + LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) + + LeaseCheckpoint(lc *pb.LeaseCheckpointRequest) (*pb.LeaseCheckpointResponse, error) + + Alarm(*pb.AlarmRequest) (*pb.AlarmResponse, error) + + Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error) + + AuthEnable() (*pb.AuthEnableResponse, error) + AuthDisable() (*pb.AuthDisableResponse, error) + AuthStatus() (*pb.AuthStatusResponse, error) + + UserAdd(ua *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) + UserDelete(ua *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) + UserChangePassword(ua *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) + UserGrantRole(ua *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) + UserGet(ua *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) + UserRevokeRole(ua *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) + RoleAdd(ua *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) + RoleGrantPermission(ua *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) + RoleGet(ua *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) + RoleRevokePermission(ua *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) + RoleDelete(ua *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) + UserList(ua *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) + RoleList(ua *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) +} + +type SnapshotServer interface { + ForceSnapshot() +} + +type applierV3backend struct { + lg *zap.Logger + kv mvcc.KV + alarmStore *v3alarm.AlarmStore + authStore auth.AuthStore + lessor lease.Lessor + cluster *membership.RaftCluster + raftStatus RaftStatusGetter + snapshotServer SnapshotServer + consistentIndex cindex.ConsistentIndexer + + txnModeWriteWithSharedBuffer bool +} + +func newApplierV3Backend( + lg *zap.Logger, + kv mvcc.KV, + alarmStore *v3alarm.AlarmStore, + authStore auth.AuthStore, + lessor lease.Lessor, + cluster *membership.RaftCluster, + raftStatus RaftStatusGetter, + snapshotServer SnapshotServer, + consistentIndex cindex.ConsistentIndexer, + txnModeWriteWithSharedBuffer bool) applierV3 { + return &applierV3backend{ + lg: lg, + kv: kv, + alarmStore: alarmStore, + authStore: authStore, + lessor: lessor, + cluster: cluster, + raftStatus: raftStatus, + snapshotServer: snapshotServer, + consistentIndex: consistentIndex, + txnModeWriteWithSharedBuffer: txnModeWriteWithSharedBuffer} +} + +func (a *applierV3backend) Apply(r *pb.InternalRaftRequest, applyFunc applyFunc) *Result { + return applyFunc(r) +} + +func (a *applierV3backend) Put(p *pb.PutRequest) (resp *pb.PutResponse, trace *traceutil.Trace, err error) { + return mvcctxn.Put(context.TODO(), a.lg, a.lessor, a.kv, p) +} + +func (a *applierV3backend) DeleteRange(dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, *traceutil.Trace, error) { + return mvcctxn.DeleteRange(context.TODO(), a.lg, a.kv, dr) +} + +func (a *applierV3backend) Range(r *pb.RangeRequest) (*pb.RangeResponse, *traceutil.Trace, error) { + return mvcctxn.Range(context.TODO(), a.lg, a.kv, r) +} + +func (a *applierV3backend) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) { + return mvcctxn.Txn(context.TODO(), a.lg, rt, a.txnModeWriteWithSharedBuffer, a.kv, a.lessor) +} + +func (a *applierV3backend) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, *traceutil.Trace, error) { + resp := &pb.CompactionResponse{} + resp.Header = &pb.ResponseHeader{} + trace := traceutil.New("compact", + a.lg, + traceutil.Field{Key: "revision", Value: compaction.Revision}, + ) + + ch, err := a.kv.Compact(trace, compaction.Revision) + if err != nil { + return nil, ch, nil, err + } + // get the current revision. which key to get is not important. + rr, _ := a.kv.Range(context.TODO(), []byte("compaction"), nil, mvcc.RangeOptions{}) + resp.Header.Revision = rr.Rev + return resp, ch, trace, err +} + +func (a *applierV3backend) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { + l, err := a.lessor.Grant(lease.LeaseID(lc.ID), lc.TTL) + resp := &pb.LeaseGrantResponse{} + if err == nil { + resp.ID = int64(l.ID) + resp.TTL = l.TTL() + resp.Header = a.newHeader() + } + return resp, err +} + +func (a *applierV3backend) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { + err := a.lessor.Revoke(lease.LeaseID(lc.ID)) + return &pb.LeaseRevokeResponse{Header: a.newHeader()}, err +} + +func (a *applierV3backend) LeaseCheckpoint(lc *pb.LeaseCheckpointRequest) (*pb.LeaseCheckpointResponse, error) { + for _, c := range lc.Checkpoints { + err := a.lessor.Checkpoint(lease.LeaseID(c.ID), c.Remaining_TTL) + if err != nil { + return &pb.LeaseCheckpointResponse{Header: a.newHeader()}, err + } + } + return &pb.LeaseCheckpointResponse{Header: a.newHeader()}, nil +} + +func (a *applierV3backend) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error) { + resp := &pb.AlarmResponse{} + + switch ar.Action { + case pb.AlarmRequest_GET: + resp.Alarms = a.alarmStore.Get(ar.Alarm) + case pb.AlarmRequest_ACTIVATE: + if ar.Alarm == pb.AlarmType_NONE { + break + } + m := a.alarmStore.Activate(types.ID(ar.MemberID), ar.Alarm) + if m == nil { + break + } + resp.Alarms = append(resp.Alarms, m) + alarms.WithLabelValues(types.ID(ar.MemberID).String(), m.Alarm.String()).Inc() + case pb.AlarmRequest_DEACTIVATE: + m := a.alarmStore.Deactivate(types.ID(ar.MemberID), ar.Alarm) + if m == nil { + break + } + resp.Alarms = append(resp.Alarms, m) + alarms.WithLabelValues(types.ID(ar.MemberID).String(), m.Alarm.String()).Dec() + default: + return nil, nil + } + return resp, nil +} + +type applierV3Capped struct { + applierV3 + q serverstorage.BackendQuota +} + +// newApplierV3Capped creates an applyV3 that will reject Puts and transactions +// with Puts so that the number of keys in the store is capped. +func newApplierV3Capped(base applierV3) applierV3 { return &applierV3Capped{applierV3: base} } + +func (a *applierV3Capped) Put(_ *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) { + return nil, nil, errors.ErrNoSpace +} + +func (a *applierV3Capped) Txn(r *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) { + if a.q.Cost(r) > 0 { + return nil, nil, errors.ErrNoSpace + } + return a.applierV3.Txn(r) +} + +func (a *applierV3Capped) LeaseGrant(_ *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { + return nil, errors.ErrNoSpace +} + +func (a *applierV3backend) AuthEnable() (*pb.AuthEnableResponse, error) { + err := a.authStore.AuthEnable() + if err != nil { + return nil, err + } + return &pb.AuthEnableResponse{Header: a.newHeader()}, nil +} + +func (a *applierV3backend) AuthDisable() (*pb.AuthDisableResponse, error) { + a.authStore.AuthDisable() + return &pb.AuthDisableResponse{Header: a.newHeader()}, nil +} + +func (a *applierV3backend) AuthStatus() (*pb.AuthStatusResponse, error) { + enabled := a.authStore.IsAuthEnabled() + authRevision := a.authStore.Revision() + return &pb.AuthStatusResponse{Header: a.newHeader(), Enabled: enabled, AuthRevision: authRevision}, nil +} + +func (a *applierV3backend) Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error) { + ctx := context.WithValue(context.WithValue(context.Background(), auth.AuthenticateParamIndex{}, a.consistentIndex.ConsistentIndex()), auth.AuthenticateParamSimpleTokenPrefix{}, r.SimpleToken) + resp, err := a.authStore.Authenticate(ctx, r.Name, r.Password) + if resp != nil { + resp.Header = a.newHeader() + } + return resp, err +} + +func (a *applierV3backend) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) { + resp, err := a.authStore.UserAdd(r) + if resp != nil { + resp.Header = a.newHeader() + } + return resp, err +} + +func (a *applierV3backend) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) { + resp, err := a.authStore.UserDelete(r) + if resp != nil { + resp.Header = a.newHeader() + } + return resp, err +} + +func (a *applierV3backend) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) { + resp, err := a.authStore.UserChangePassword(r) + if resp != nil { + resp.Header = a.newHeader() + } + return resp, err +} + +func (a *applierV3backend) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) { + resp, err := a.authStore.UserGrantRole(r) + if resp != nil { + resp.Header = a.newHeader() + } + return resp, err +} + +func (a *applierV3backend) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { + resp, err := a.authStore.UserGet(r) + if resp != nil { + resp.Header = a.newHeader() + } + return resp, err +} + +func (a *applierV3backend) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) { + resp, err := a.authStore.UserRevokeRole(r) + if resp != nil { + resp.Header = a.newHeader() + } + return resp, err +} + +func (a *applierV3backend) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) { + resp, err := a.authStore.RoleAdd(r) + if resp != nil { + resp.Header = a.newHeader() + } + return resp, err +} + +func (a *applierV3backend) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) { + resp, err := a.authStore.RoleGrantPermission(r) + if resp != nil { + resp.Header = a.newHeader() + } + return resp, err +} + +func (a *applierV3backend) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) { + resp, err := a.authStore.RoleGet(r) + if resp != nil { + resp.Header = a.newHeader() + } + return resp, err +} + +func (a *applierV3backend) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) { + resp, err := a.authStore.RoleRevokePermission(r) + if resp != nil { + resp.Header = a.newHeader() + } + return resp, err +} + +func (a *applierV3backend) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) { + resp, err := a.authStore.RoleDelete(r) + if resp != nil { + resp.Header = a.newHeader() + } + return resp, err +} + +func (a *applierV3backend) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) { + resp, err := a.authStore.UserList(r) + if resp != nil { + resp.Header = a.newHeader() + } + return resp, err +} + +func (a *applierV3backend) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) { + resp, err := a.authStore.RoleList(r) + if resp != nil { + resp.Header = a.newHeader() + } + return resp, err +} + +type applierMembership struct { + lg *zap.Logger + cluster *membership.RaftCluster + snapshotServer SnapshotServer +} + +func NewApplierMembership(lg *zap.Logger, cluster *membership.RaftCluster, snapshotServer SnapshotServer) *applierMembership { + return &applierMembership{ + lg: lg, + cluster: cluster, + snapshotServer: snapshotServer, + } +} + +func (a *applierMembership) ClusterVersionSet(r *membershippb.ClusterVersionSetRequest, shouldApplyV3 membership.ShouldApplyV3) { + prevVersion := a.cluster.Version() + newVersion := semver.Must(semver.NewVersion(r.Ver)) + a.cluster.SetVersion(newVersion, api.UpdateCapability, shouldApplyV3) + // Force snapshot after cluster version downgrade. + if prevVersion != nil && newVersion.LessThan(*prevVersion) { + lg := a.lg + if lg != nil { + lg.Info("Cluster version downgrade detected, forcing snapshot", + zap.String("prev-cluster-version", prevVersion.String()), + zap.String("new-cluster-version", newVersion.String()), + ) + } + a.snapshotServer.ForceSnapshot() + } +} + +func (a *applierMembership) ClusterMemberAttrSet(r *membershippb.ClusterMemberAttrSetRequest, shouldApplyV3 membership.ShouldApplyV3) { + a.cluster.UpdateAttributes( + types.ID(r.Member_ID), + membership.Attributes{ + Name: r.MemberAttributes.Name, + ClientURLs: r.MemberAttributes.ClientUrls, + }, + shouldApplyV3, + ) +} + +func (a *applierMembership) DowngradeInfoSet(r *membershippb.DowngradeInfoSetRequest, shouldApplyV3 membership.ShouldApplyV3) { + d := version.DowngradeInfo{Enabled: false} + if r.Enabled { + d = version.DowngradeInfo{Enabled: true, TargetVersion: r.Ver} + } + a.cluster.SetDowngradeInfo(&d, shouldApplyV3) +} + +type quotaApplierV3 struct { + applierV3 + q serverstorage.Quota +} + +func newQuotaApplierV3(lg *zap.Logger, quotaBackendBytesCfg int64, be backend.Backend, app applierV3) applierV3 { + return "aApplierV3{app, serverstorage.NewBackendQuota(lg, quotaBackendBytesCfg, be, "v3-applier")} +} + +func (a *quotaApplierV3) Put(p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) { + ok := a.q.Available(p) + resp, trace, err := a.applierV3.Put(p) + if err == nil && !ok { + err = errors.ErrNoSpace + } + return resp, trace, err +} + +func (a *quotaApplierV3) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) { + ok := a.q.Available(rt) + resp, trace, err := a.applierV3.Txn(rt) + if err == nil && !ok { + err = errors.ErrNoSpace + } + return resp, trace, err +} + +func (a *quotaApplierV3) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { + ok := a.q.Available(lc) + resp, err := a.applierV3.LeaseGrant(lc) + if err == nil && !ok { + err = errors.ErrNoSpace + } + return resp, err +} + +func (a *applierV3backend) newHeader() *pb.ResponseHeader { + return &pb.ResponseHeader{ + ClusterId: uint64(a.cluster.ID()), + MemberId: uint64(a.raftStatus.MemberID()), + Revision: a.kv.Rev(), + RaftTerm: a.raftStatus.Term(), + } +} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply_auth.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply/apply_auth.go similarity index 66% rename from vendor/go.etcd.io/etcd/server/v3/etcdserver/apply_auth.go rename to vendor/go.etcd.io/etcd/server/v3/etcdserver/apply/apply_auth.go index beafa967ba957..3922deebd0127 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply_auth.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply/apply_auth.go @@ -12,18 +12,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -package etcdserver +package apply import ( - "context" "sync" pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/pkg/v3/traceutil" "go.etcd.io/etcd/server/v3/auth" - "go.etcd.io/etcd/server/v3/etcdserver/api/membership" + "go.etcd.io/etcd/server/v3/etcdserver/txn" "go.etcd.io/etcd/server/v3/lease" - "go.etcd.io/etcd/server/v3/mvcc" ) type authApplierV3 struct { @@ -42,7 +40,7 @@ func newAuthApplierV3(as auth.AuthStore, base applierV3, lessor lease.Lessor) *a return &authApplierV3{applierV3: base, as: as, lessor: lessor} } -func (aa *authApplierV3) Apply(r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *applyResult { +func (aa *authApplierV3) Apply(r *pb.InternalRaftRequest, applyFunc applyFunc) *Result { aa.mu.Lock() defer aa.mu.Unlock() if r.Header != nil { @@ -55,16 +53,16 @@ func (aa *authApplierV3) Apply(r *pb.InternalRaftRequest, shouldApplyV3 membersh if err := aa.as.IsAdminPermitted(&aa.authInfo); err != nil { aa.authInfo.Username = "" aa.authInfo.Revision = 0 - return &applyResult{err: err} + return &Result{Err: err} } } - ret := aa.applierV3.Apply(r, shouldApplyV3) + ret := aa.applierV3.Apply(r, applyFunc) aa.authInfo.Username = "" aa.authInfo.Revision = 0 return ret } -func (aa *authApplierV3) Put(ctx context.Context, txn mvcc.TxnWrite, r *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) { +func (aa *authApplierV3) Put(r *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) { if err := aa.as.IsPutPermitted(&aa.authInfo, r.Key); err != nil { return nil, nil, err } @@ -83,90 +81,35 @@ func (aa *authApplierV3) Put(ctx context.Context, txn mvcc.TxnWrite, r *pb.PutRe return nil, nil, err } } - return aa.applierV3.Put(ctx, txn, r) + return aa.applierV3.Put(r) } -func (aa *authApplierV3) Range(ctx context.Context, txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) { +func (aa *authApplierV3) Range(r *pb.RangeRequest) (*pb.RangeResponse, *traceutil.Trace, error) { if err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil { - return nil, err + return nil, nil, err } - return aa.applierV3.Range(ctx, txn, r) + return aa.applierV3.Range(r) } -func (aa *authApplierV3) DeleteRange(txn mvcc.TxnWrite, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { +func (aa *authApplierV3) DeleteRange(r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, *traceutil.Trace, error) { if err := aa.as.IsDeleteRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil { - return nil, err + return nil, nil, err } if r.PrevKv { err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd) if err != nil { - return nil, err - } - } - - return aa.applierV3.DeleteRange(txn, r) -} - -func checkTxnReqsPermission(as auth.AuthStore, ai *auth.AuthInfo, reqs []*pb.RequestOp) error { - for _, requ := range reqs { - switch tv := requ.Request.(type) { - case *pb.RequestOp_RequestRange: - if tv.RequestRange == nil { - continue - } - - if err := as.IsRangePermitted(ai, tv.RequestRange.Key, tv.RequestRange.RangeEnd); err != nil { - return err - } - - case *pb.RequestOp_RequestPut: - if tv.RequestPut == nil { - continue - } - - if err := as.IsPutPermitted(ai, tv.RequestPut.Key); err != nil { - return err - } - - case *pb.RequestOp_RequestDeleteRange: - if tv.RequestDeleteRange == nil { - continue - } - - if tv.RequestDeleteRange.PrevKv { - err := as.IsRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd) - if err != nil { - return err - } - } - - err := as.IsDeleteRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd) - if err != nil { - return err - } + return nil, nil, err } } - return nil -} - -func checkTxnAuth(as auth.AuthStore, ai *auth.AuthInfo, rt *pb.TxnRequest) error { - for _, c := range rt.Compare { - if err := as.IsRangePermitted(ai, c.Key, c.RangeEnd); err != nil { - return err - } - } - if err := checkTxnReqsPermission(as, ai, rt.Success); err != nil { - return err - } - return checkTxnReqsPermission(as, ai, rt.Failure) + return aa.applierV3.DeleteRange(r) } -func (aa *authApplierV3) Txn(ctx context.Context, rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) { - if err := checkTxnAuth(aa.as, &aa.authInfo, rt); err != nil { +func (aa *authApplierV3) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) { + if err := txn.CheckTxnAuth(aa.as, &aa.authInfo, rt); err != nil { return nil, nil, err } - return aa.applierV3.Txn(ctx, rt) + return aa.applierV3.Txn(rt) } func (aa *authApplierV3) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply/corrupt.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply/corrupt.go new file mode 100644 index 0000000000000..c198119510aa8 --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply/corrupt.go @@ -0,0 +1,55 @@ +// Copyright 2022 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package apply + +import ( + pb "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.etcd.io/etcd/pkg/v3/traceutil" + "go.etcd.io/etcd/server/v3/etcdserver/errors" +) + +type applierV3Corrupt struct { + applierV3 +} + +func newApplierV3Corrupt(a applierV3) *applierV3Corrupt { return &applierV3Corrupt{a} } + +func (a *applierV3Corrupt) Put(_ *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) { + return nil, nil, errors.ErrCorrupt +} + +func (a *applierV3Corrupt) Range(_ *pb.RangeRequest) (*pb.RangeResponse, *traceutil.Trace, error) { + return nil, nil, errors.ErrCorrupt +} + +func (a *applierV3Corrupt) DeleteRange(_ *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, *traceutil.Trace, error) { + return nil, nil, errors.ErrCorrupt +} + +func (a *applierV3Corrupt) Txn(_ *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) { + return nil, nil, errors.ErrCorrupt +} + +func (a *applierV3Corrupt) Compaction(_ *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, *traceutil.Trace, error) { + return nil, nil, nil, errors.ErrCorrupt +} + +func (a *applierV3Corrupt) LeaseGrant(_ *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { + return nil, errors.ErrCorrupt +} + +func (a *applierV3Corrupt) LeaseRevoke(_ *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { + return nil, errors.ErrCorrupt +} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2stats/stats.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply/metrics.go similarity index 53% rename from vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2stats/stats.go rename to vendor/go.etcd.io/etcd/server/v3/etcdserver/apply/metrics.go index cbf60215a2413..34578578b4a06 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2stats/stats.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply/metrics.go @@ -1,4 +1,4 @@ -// Copyright 2015 The etcd Authors +// Copyright 2022 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,15 +12,20 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package v2stats defines a standard interface for etcd cluster statistics. -package v2stats +package apply -type Stats interface { - // SelfStats returns the struct representing statistics of this server - SelfStats() []byte - // LeaderStats returns the statistics of all followers in the cluster - // if this server is leader. Otherwise, nil is returned. - LeaderStats() []byte - // StoreStats returns statistics of the store backing this EtcdServer - StoreStats() []byte +import "github.com/prometheus/client_golang/prometheus" + +var ( + alarms = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "etcd_debugging", + Subsystem: "server", + Name: "alarms", + Help: "Alarms for every member in cluster. 1 for 'server_id' label with current ID. 2 for 'alarm_type' label with type of this alarm", + }, + []string{"server_id", "alarm_type"}) +) + +func init() { + prometheus.MustRegister(alarms) } diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply/uber_applier.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply/uber_applier.go new file mode 100644 index 0000000000000..9862d7d3d3903 --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply/uber_applier.go @@ -0,0 +1,226 @@ +// Copyright 2022 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package apply + +import ( + "errors" + "time" + + "go.uber.org/zap" + + pb "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.etcd.io/etcd/server/v3/auth" + "go.etcd.io/etcd/server/v3/etcdserver/api/membership" + "go.etcd.io/etcd/server/v3/etcdserver/api/v3alarm" + "go.etcd.io/etcd/server/v3/etcdserver/cindex" + "go.etcd.io/etcd/server/v3/etcdserver/txn" + "go.etcd.io/etcd/server/v3/lease" + "go.etcd.io/etcd/server/v3/storage/backend" + "go.etcd.io/etcd/server/v3/storage/mvcc" +) + +type UberApplier interface { + Apply(r *pb.InternalRaftRequest) *Result +} + +type uberApplier struct { + lg *zap.Logger + + alarmStore *v3alarm.AlarmStore + warningApplyDuration time.Duration + + // This is the applier that is taking in consideration current alarms + applyV3 applierV3 + + // This is the applier used for wrapping when alarms change + applyV3base applierV3 +} + +func NewUberApplier( + lg *zap.Logger, + be backend.Backend, + kv mvcc.KV, + alarmStore *v3alarm.AlarmStore, + authStore auth.AuthStore, + lessor lease.Lessor, + cluster *membership.RaftCluster, + raftStatus RaftStatusGetter, + snapshotServer SnapshotServer, + consistentIndex cindex.ConsistentIndexer, + warningApplyDuration time.Duration, + txnModeWriteWithSharedBuffer bool, + quotaBackendBytesCfg int64) UberApplier { + applyV3base := newApplierV3(lg, be, kv, alarmStore, authStore, lessor, cluster, raftStatus, snapshotServer, consistentIndex, txnModeWriteWithSharedBuffer, quotaBackendBytesCfg) + + ua := &uberApplier{ + lg: lg, + alarmStore: alarmStore, + warningApplyDuration: warningApplyDuration, + applyV3: applyV3base, + applyV3base: applyV3base, + } + ua.restoreAlarms() + return ua +} + +func newApplierV3( + lg *zap.Logger, + be backend.Backend, + kv mvcc.KV, + alarmStore *v3alarm.AlarmStore, + authStore auth.AuthStore, + lessor lease.Lessor, + cluster *membership.RaftCluster, + raftStatus RaftStatusGetter, + snapshotServer SnapshotServer, + consistentIndex cindex.ConsistentIndexer, + txnModeWriteWithSharedBuffer bool, + quotaBackendBytesCfg int64) applierV3 { + applierBackend := newApplierV3Backend(lg, kv, alarmStore, authStore, lessor, cluster, raftStatus, snapshotServer, consistentIndex, txnModeWriteWithSharedBuffer) + return newAuthApplierV3( + authStore, + newQuotaApplierV3(lg, quotaBackendBytesCfg, be, applierBackend), + lessor, + ) +} + +func (a *uberApplier) restoreAlarms() { + noSpaceAlarms := len(a.alarmStore.Get(pb.AlarmType_NOSPACE)) > 0 + corruptAlarms := len(a.alarmStore.Get(pb.AlarmType_CORRUPT)) > 0 + a.applyV3 = a.applyV3base + if noSpaceAlarms { + a.applyV3 = newApplierV3Capped(a.applyV3) + } + if corruptAlarms { + a.applyV3 = newApplierV3Corrupt(a.applyV3) + } +} + +func (a *uberApplier) Apply(r *pb.InternalRaftRequest) *Result { + // We first execute chain of Apply() calls down the hierarchy: + // (i.e. CorruptApplier -> CappedApplier -> Auth -> Quota -> Backend), + // then dispatch() unpacks the request to a specific method (like Put), + // that gets executed down the hierarchy again: + // i.e. CorruptApplier.Put(CappedApplier.Put(...(BackendApplier.Put(...)))). + return a.applyV3.Apply(r, a.dispatch) +} + +// dispatch translates the request (r) into appropriate call (like Put) on +// the underlying applyV3 object. +func (a *uberApplier) dispatch(r *pb.InternalRaftRequest) *Result { + op := "unknown" + ar := &Result{} + defer func(start time.Time) { + success := ar.Err == nil || errors.Is(ar.Err, mvcc.ErrCompacted) + txn.ApplySecObserve(v3Version, op, success, time.Since(start)) + txn.WarnOfExpensiveRequest(a.lg, a.warningApplyDuration, start, &pb.InternalRaftStringer{Request: r}, ar.Resp, ar.Err) + if !success { + txn.WarnOfFailedRequest(a.lg, start, &pb.InternalRaftStringer{Request: r}, ar.Resp, ar.Err) + } + }(time.Now()) + + switch { + case r.Range != nil: + op = "Range" + ar.Resp, ar.Trace, ar.Err = a.applyV3.Range(r.Range) + case r.Put != nil: + op = "Put" + ar.Resp, ar.Trace, ar.Err = a.applyV3.Put(r.Put) + case r.DeleteRange != nil: + op = "DeleteRange" + ar.Resp, ar.Trace, ar.Err = a.applyV3.DeleteRange(r.DeleteRange) + case r.Txn != nil: + op = "Txn" + ar.Resp, ar.Trace, ar.Err = a.applyV3.Txn(r.Txn) + case r.Compaction != nil: + op = "Compaction" + ar.Resp, ar.Physc, ar.Trace, ar.Err = a.applyV3.Compaction(r.Compaction) + case r.LeaseGrant != nil: + op = "LeaseGrant" + ar.Resp, ar.Err = a.applyV3.LeaseGrant(r.LeaseGrant) + case r.LeaseRevoke != nil: + op = "LeaseRevoke" + ar.Resp, ar.Err = a.applyV3.LeaseRevoke(r.LeaseRevoke) + case r.LeaseCheckpoint != nil: + op = "LeaseCheckpoint" + ar.Resp, ar.Err = a.applyV3.LeaseCheckpoint(r.LeaseCheckpoint) + case r.Alarm != nil: + op = "Alarm" + ar.Resp, ar.Err = a.Alarm(r.Alarm) + case r.Authenticate != nil: + op = "Authenticate" + ar.Resp, ar.Err = a.applyV3.Authenticate(r.Authenticate) + case r.AuthEnable != nil: + op = "AuthEnable" + ar.Resp, ar.Err = a.applyV3.AuthEnable() + case r.AuthDisable != nil: + op = "AuthDisable" + ar.Resp, ar.Err = a.applyV3.AuthDisable() + case r.AuthStatus != nil: + ar.Resp, ar.Err = a.applyV3.AuthStatus() + case r.AuthUserAdd != nil: + op = "AuthUserAdd" + ar.Resp, ar.Err = a.applyV3.UserAdd(r.AuthUserAdd) + case r.AuthUserDelete != nil: + op = "AuthUserDelete" + ar.Resp, ar.Err = a.applyV3.UserDelete(r.AuthUserDelete) + case r.AuthUserChangePassword != nil: + op = "AuthUserChangePassword" + ar.Resp, ar.Err = a.applyV3.UserChangePassword(r.AuthUserChangePassword) + case r.AuthUserGrantRole != nil: + op = "AuthUserGrantRole" + ar.Resp, ar.Err = a.applyV3.UserGrantRole(r.AuthUserGrantRole) + case r.AuthUserGet != nil: + op = "AuthUserGet" + ar.Resp, ar.Err = a.applyV3.UserGet(r.AuthUserGet) + case r.AuthUserRevokeRole != nil: + op = "AuthUserRevokeRole" + ar.Resp, ar.Err = a.applyV3.UserRevokeRole(r.AuthUserRevokeRole) + case r.AuthRoleAdd != nil: + op = "AuthRoleAdd" + ar.Resp, ar.Err = a.applyV3.RoleAdd(r.AuthRoleAdd) + case r.AuthRoleGrantPermission != nil: + op = "AuthRoleGrantPermission" + ar.Resp, ar.Err = a.applyV3.RoleGrantPermission(r.AuthRoleGrantPermission) + case r.AuthRoleGet != nil: + op = "AuthRoleGet" + ar.Resp, ar.Err = a.applyV3.RoleGet(r.AuthRoleGet) + case r.AuthRoleRevokePermission != nil: + op = "AuthRoleRevokePermission" + ar.Resp, ar.Err = a.applyV3.RoleRevokePermission(r.AuthRoleRevokePermission) + case r.AuthRoleDelete != nil: + op = "AuthRoleDelete" + ar.Resp, ar.Err = a.applyV3.RoleDelete(r.AuthRoleDelete) + case r.AuthUserList != nil: + op = "AuthUserList" + ar.Resp, ar.Err = a.applyV3.UserList(r.AuthUserList) + case r.AuthRoleList != nil: + op = "AuthRoleList" + ar.Resp, ar.Err = a.applyV3.RoleList(r.AuthRoleList) + default: + a.lg.Panic("not implemented apply", zap.Stringer("raft-request", r)) + } + return ar +} + +func (a *uberApplier) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error) { + resp, err := a.applyV3.Alarm(ar) + + if ar.Action == pb.AlarmRequest_ACTIVATE || + ar.Action == pb.AlarmRequest_DEACTIVATE { + a.restoreAlarms() + } + return resp, err +} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply_v2.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply_v2.go index 2f5913f18c681..cb48330658aca 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply_v2.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply_v2.go @@ -16,144 +16,35 @@ package etcdserver import ( "encoding/json" - "fmt" + "net/http" "path" - "strconv" - "time" "github.com/coreos/go-semver/semver" - "go.etcd.io/etcd/pkg/v3/pbutil" + "go.uber.org/zap" + "go.etcd.io/etcd/server/v3/etcdserver/api" "go.etcd.io/etcd/server/v3/etcdserver/api/membership" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" - - "go.uber.org/zap" ) -const v2Version = "v2" - -// ApplierV2 is the interface for processing V2 raft messages -type ApplierV2 interface { - Delete(r *RequestV2) Response - Post(r *RequestV2) Response - Put(r *RequestV2, shouldApplyV3 membership.ShouldApplyV3) Response - QGet(r *RequestV2) Response - Sync(r *RequestV2) Response -} - -func NewApplierV2(lg *zap.Logger, s v2store.Store, c *membership.RaftCluster) ApplierV2 { - if lg == nil { - lg = zap.NewNop() - } - return &applierV2store{lg: lg, store: s, cluster: c} -} - -type applierV2store struct { - lg *zap.Logger - store v2store.Store - cluster *membership.RaftCluster -} - -func (a *applierV2store) Delete(r *RequestV2) Response { - switch { - case r.PrevIndex > 0 || r.PrevValue != "": - return toResponse(a.store.CompareAndDelete(r.Path, r.PrevValue, r.PrevIndex)) - default: - return toResponse(a.store.Delete(r.Path, r.Dir, r.Recursive)) +func (s *EtcdServer) applyV2Request(r *RequestV2, shouldApplyV3 membership.ShouldApplyV3) { + if r.Method != http.MethodPut || (!storeMemberAttributeRegexp.MatchString(r.Path) && r.Path != membership.StoreClusterVersionKey()) { + s.lg.Panic("detected disallowed v2 WAL for stage --v2-deprecation=write-only", zap.String("method", r.Method)) } -} - -func (a *applierV2store) Post(r *RequestV2) Response { - return toResponse(a.store.Create(r.Path, r.Dir, r.Val, true, r.TTLOptions())) -} - -func (a *applierV2store) Put(r *RequestV2, shouldApplyV3 membership.ShouldApplyV3) Response { - ttlOptions := r.TTLOptions() - exists, existsSet := pbutil.GetBool(r.PrevExist) - switch { - case existsSet: - if exists { - if r.PrevIndex == 0 && r.PrevValue == "" { - return toResponse(a.store.Update(r.Path, r.Val, ttlOptions)) - } - return toResponse(a.store.CompareAndSwap(r.Path, r.PrevValue, r.PrevIndex, r.Val, ttlOptions)) + if storeMemberAttributeRegexp.MatchString(r.Path) { + id := membership.MustParseMemberIDFromKey(s.lg, path.Dir(r.Path)) + var attr membership.Attributes + if err := json.Unmarshal([]byte(r.Val), &attr); err != nil { + s.lg.Panic("failed to unmarshal", zap.String("value", r.Val), zap.Error(err)) } - return toResponse(a.store.Create(r.Path, r.Dir, r.Val, false, ttlOptions)) - case r.PrevIndex > 0 || r.PrevValue != "": - return toResponse(a.store.CompareAndSwap(r.Path, r.PrevValue, r.PrevIndex, r.Val, ttlOptions)) - default: - if storeMemberAttributeRegexp.MatchString(r.Path) { - id := membership.MustParseMemberIDFromKey(a.lg, path.Dir(r.Path)) - var attr membership.Attributes - if err := json.Unmarshal([]byte(r.Val), &attr); err != nil { - a.lg.Panic("failed to unmarshal", zap.String("value", r.Val), zap.Error(err)) - } - if a.cluster != nil { - a.cluster.UpdateAttributes(id, attr, shouldApplyV3) - } - // return an empty response since there is no consumer. - return Response{} + if s.cluster != nil { + s.cluster.UpdateAttributes(id, attr, shouldApplyV3) } - // TODO remove v2 version set to avoid the conflict between v2 and v3 in etcd 3.6 - if r.Path == membership.StoreClusterVersionKey() { - if a.cluster != nil { - // persist to backend given v2store can be very stale - a.cluster.SetVersion(semver.Must(semver.NewVersion(r.Val)), api.UpdateCapability, shouldApplyV3) - } - return Response{} - } - return toResponse(a.store.Set(r.Path, r.Dir, r.Val, ttlOptions)) - } -} - -func (a *applierV2store) QGet(r *RequestV2) Response { - return toResponse(a.store.Get(r.Path, r.Recursive, r.Sorted)) -} - -func (a *applierV2store) Sync(r *RequestV2) Response { - a.store.DeleteExpiredKeys(time.Unix(0, r.Time)) - return Response{} -} - -// applyV2Request interprets r as a call to v2store.X -// and returns a Response interpreted from v2store.Event -func (s *EtcdServer) applyV2Request(r *RequestV2, shouldApplyV3 membership.ShouldApplyV3) (resp Response) { - stringer := panicAlternativeStringer{ - stringer: r, - alternative: func() string { return fmt.Sprintf("id:%d,method:%s,path:%s", r.ID, r.Method, r.Path) }, } - defer func(start time.Time) { - success := resp.Err == nil - applySec.WithLabelValues(v2Version, r.Method, strconv.FormatBool(success)).Observe(time.Since(start).Seconds()) - warnOfExpensiveRequest(s.Logger(), s.Cfg.WarningApplyDuration, start, stringer, nil, nil) - }(time.Now()) - - switch r.Method { - case "POST": - return s.applyV2.Post(r) - case "PUT": - return s.applyV2.Put(r, shouldApplyV3) - case "DELETE": - return s.applyV2.Delete(r) - case "QGET": - return s.applyV2.QGet(r) - case "SYNC": - return s.applyV2.Sync(r) - default: - // This should never be reached, but just in case: - return Response{Err: ErrUnknownMethod} - } -} - -func (r *RequestV2) TTLOptions() v2store.TTLOptionSet { - refresh, _ := pbutil.GetBool(r.Refresh) - ttlOptions := v2store.TTLOptionSet{Refresh: refresh} - if r.Expiration != 0 { - ttlOptions.ExpireTime = time.Unix(0, r.Expiration) + // TODO remove v2 version set to avoid the conflict between v2 and v3 in etcd 3.6 + if r.Path == membership.StoreClusterVersionKey() { + if s.cluster != nil { + // persist to backend given v2store can be very stale + s.cluster.SetVersion(semver.Must(semver.NewVersion(r.Val)), api.UpdateCapability, shouldApplyV3) + } } - return ttlOptions -} - -func toResponse(ev *v2store.Event, err error) Response { - return Response{Event: ev, Err: err} } diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/bootstrap.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/bootstrap.go new file mode 100644 index 0000000000000..992e26d4f9672 --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/bootstrap.go @@ -0,0 +1,713 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "os" + "strings" + "time" + + "github.com/coreos/go-semver/semver" + "github.com/dustin/go-humanize" + "go.uber.org/zap" + + "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.etcd.io/etcd/client/pkg/v3/fileutil" + "go.etcd.io/etcd/client/pkg/v3/types" + "go.etcd.io/etcd/pkg/v3/pbutil" + "go.etcd.io/etcd/server/v3/config" + "go.etcd.io/etcd/server/v3/etcdserver/api" + "go.etcd.io/etcd/server/v3/etcdserver/api/membership" + "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp" + "go.etcd.io/etcd/server/v3/etcdserver/api/snap" + "go.etcd.io/etcd/server/v3/etcdserver/api/v2discovery" + "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" + "go.etcd.io/etcd/server/v3/etcdserver/api/v3discovery" + "go.etcd.io/etcd/server/v3/etcdserver/cindex" + servererrors "go.etcd.io/etcd/server/v3/etcdserver/errors" + serverstorage "go.etcd.io/etcd/server/v3/storage" + "go.etcd.io/etcd/server/v3/storage/backend" + "go.etcd.io/etcd/server/v3/storage/schema" + "go.etcd.io/etcd/server/v3/storage/wal" + "go.etcd.io/etcd/server/v3/storage/wal/walpb" + "go.etcd.io/raft/v3" + "go.etcd.io/raft/v3/raftpb" +) + +func bootstrap(cfg config.ServerConfig) (b *bootstrappedServer, err error) { + if cfg.MaxRequestBytes > recommendedMaxRequestBytes { + cfg.Logger.Warn( + "exceeded recommended request limit", + zap.Uint("max-request-bytes", cfg.MaxRequestBytes), + zap.String("max-request-size", humanize.Bytes(uint64(cfg.MaxRequestBytes))), + zap.Int("recommended-request-bytes", recommendedMaxRequestBytes), + zap.String("recommended-request-size", recommendedMaxRequestBytesString), + ) + } + + if terr := fileutil.TouchDirAll(cfg.Logger, cfg.DataDir); terr != nil { + return nil, fmt.Errorf("cannot access data directory: %w", terr) + } + + if terr := fileutil.TouchDirAll(cfg.Logger, cfg.MemberDir()); terr != nil { + return nil, fmt.Errorf("cannot access member directory: %w", terr) + } + ss := bootstrapSnapshot(cfg) + prt, err := rafthttp.NewRoundTripper(cfg.PeerTLSInfo, cfg.PeerDialTimeout()) + if err != nil { + return nil, err + } + + haveWAL := wal.Exist(cfg.WALDir()) + st := v2store.New(StoreClusterPrefix, StoreKeysPrefix) + backend, err := bootstrapBackend(cfg, haveWAL, st, ss) + if err != nil { + return nil, err + } + var bwal *bootstrappedWAL + + if haveWAL { + if err = fileutil.IsDirWriteable(cfg.WALDir()); err != nil { + return nil, fmt.Errorf("cannot write to WAL directory: %w", err) + } + bwal = bootstrapWALFromSnapshot(cfg, backend.snapshot) + } + + cfg.Logger.Info("bootstrapping cluster") + cluster, err := bootstrapCluster(cfg, bwal, prt) + if err != nil { + backend.Close() + return nil, err + } + + cfg.Logger.Info("bootstrapping storage") + s := bootstrapStorage(cfg, st, backend, bwal, cluster) + + if err = cluster.Finalize(cfg, s); err != nil { + backend.Close() + return nil, err + } + + cfg.Logger.Info("bootstrapping raft") + raft := bootstrapRaft(cfg, cluster, s.wal) + return &bootstrappedServer{ + prt: prt, + ss: ss, + storage: s, + cluster: cluster, + raft: raft, + }, nil +} + +type bootstrappedServer struct { + storage *bootstrappedStorage + cluster *bootstrappedCluster + raft *bootstrappedRaft + prt http.RoundTripper + ss *snap.Snapshotter +} + +func (s *bootstrappedServer) Close() { + s.storage.Close() +} + +type bootstrappedStorage struct { + backend *bootstrappedBackend + wal *bootstrappedWAL + st v2store.Store +} + +func (s *bootstrappedStorage) Close() { + s.backend.Close() +} + +type bootstrappedBackend struct { + beHooks *serverstorage.BackendHooks + be backend.Backend + ci cindex.ConsistentIndexer + beExist bool + snapshot *raftpb.Snapshot +} + +func (s *bootstrappedBackend) Close() { + s.be.Close() +} + +type bootstrappedCluster struct { + remotes []*membership.Member + cl *membership.RaftCluster + nodeID types.ID +} + +type bootstrappedRaft struct { + lg *zap.Logger + heartbeat time.Duration + + peers []raft.Peer + config *raft.Config + storage *raft.MemoryStorage +} + +func bootstrapStorage(cfg config.ServerConfig, st v2store.Store, be *bootstrappedBackend, wal *bootstrappedWAL, cl *bootstrappedCluster) *bootstrappedStorage { + if wal == nil { + wal = bootstrapNewWAL(cfg, cl) + } + + return &bootstrappedStorage{ + backend: be, + st: st, + wal: wal, + } +} + +func bootstrapSnapshot(cfg config.ServerConfig) *snap.Snapshotter { + if err := fileutil.TouchDirAll(cfg.Logger, cfg.SnapDir()); err != nil { + cfg.Logger.Fatal( + "failed to create snapshot directory", + zap.String("path", cfg.SnapDir()), + zap.Error(err), + ) + } + + if err := fileutil.RemoveMatchFile(cfg.Logger, cfg.SnapDir(), func(fileName string) bool { + return strings.HasPrefix(fileName, "tmp") + }); err != nil { + cfg.Logger.Error( + "failed to remove temp file(s) in snapshot directory", + zap.String("path", cfg.SnapDir()), + zap.Error(err), + ) + } + return snap.New(cfg.Logger, cfg.SnapDir()) +} + +func bootstrapBackend(cfg config.ServerConfig, haveWAL bool, st v2store.Store, ss *snap.Snapshotter) (backend *bootstrappedBackend, err error) { + beExist := fileutil.Exist(cfg.BackendPath()) + ci := cindex.NewConsistentIndex(nil) + beHooks := serverstorage.NewBackendHooks(cfg.Logger, ci) + be := serverstorage.OpenBackend(cfg, beHooks) + defer func() { + if err != nil && be != nil { + be.Close() + } + }() + ci.SetBackend(be) + schema.CreateMetaBucket(be.BatchTx()) + if cfg.ExperimentalBootstrapDefragThresholdMegabytes != 0 { + err = maybeDefragBackend(cfg, be) + if err != nil { + return nil, err + } + } + cfg.Logger.Debug("restore consistentIndex", zap.Uint64("index", ci.ConsistentIndex())) + + // TODO(serathius): Implement schema setup in fresh storage + var snapshot *raftpb.Snapshot + if haveWAL { + snapshot, be, err = recoverSnapshot(cfg, st, be, beExist, beHooks, ci, ss) + if err != nil { + return nil, err + } + } + if beExist { + s1, s2 := be.Size(), be.SizeInUse() + cfg.Logger.Info( + "recovered v3 backend", + zap.Int64("backend-size-bytes", s1), + zap.String("backend-size", humanize.Bytes(uint64(s1))), + zap.Int64("backend-size-in-use-bytes", s2), + zap.String("backend-size-in-use", humanize.Bytes(uint64(s2))), + ) + if err = schema.Validate(cfg.Logger, be.ReadTx()); err != nil { + cfg.Logger.Error("Failed to validate schema", zap.Error(err)) + return nil, err + } + } + + return &bootstrappedBackend{ + beHooks: beHooks, + be: be, + ci: ci, + beExist: beExist, + snapshot: snapshot, + }, nil +} + +func maybeDefragBackend(cfg config.ServerConfig, be backend.Backend) error { + size := be.Size() + sizeInUse := be.SizeInUse() + freeableMemory := uint(size - sizeInUse) + thresholdBytes := cfg.ExperimentalBootstrapDefragThresholdMegabytes * 1024 * 1024 + if freeableMemory < thresholdBytes { + cfg.Logger.Info("Skipping defragmentation", + zap.Int64("current-db-size-bytes", size), + zap.String("current-db-size", humanize.Bytes(uint64(size))), + zap.Int64("current-db-size-in-use-bytes", sizeInUse), + zap.String("current-db-size-in-use", humanize.Bytes(uint64(sizeInUse))), + zap.Uint("experimental-bootstrap-defrag-threshold-bytes", thresholdBytes), + zap.String("experimental-bootstrap-defrag-threshold", humanize.Bytes(uint64(thresholdBytes))), + ) + return nil + } + return be.Defrag() +} + +func bootstrapCluster(cfg config.ServerConfig, bwal *bootstrappedWAL, prt http.RoundTripper) (c *bootstrappedCluster, err error) { + switch { + case bwal == nil && !cfg.NewCluster: + c, err = bootstrapExistingClusterNoWAL(cfg, prt) + case bwal == nil && cfg.NewCluster: + c, err = bootstrapNewClusterNoWAL(cfg, prt) + case bwal != nil && bwal.haveWAL: + c, err = bootstrapClusterWithWAL(cfg, bwal.meta) + default: + return nil, fmt.Errorf("unsupported bootstrap config") + } + if err != nil { + return nil, err + } + return c, nil +} + +func bootstrapExistingClusterNoWAL(cfg config.ServerConfig, prt http.RoundTripper) (*bootstrappedCluster, error) { + if err := cfg.VerifyJoinExisting(); err != nil { + return nil, err + } + cl, err := membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, cfg.InitialPeerURLsMap, membership.WithMaxLearners(cfg.ExperimentalMaxLearners)) + if err != nil { + return nil, err + } + existingCluster, gerr := GetClusterFromRemotePeers(cfg.Logger, getRemotePeerURLs(cl, cfg.Name), prt) + if gerr != nil { + return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %w", gerr) + } + if err := membership.ValidateClusterAndAssignIDs(cfg.Logger, cl, existingCluster); err != nil { + return nil, fmt.Errorf("error validating peerURLs %s: %w", existingCluster, err) + } + if !isCompatibleWithCluster(cfg.Logger, cl, cl.MemberByName(cfg.Name).ID, prt, cfg.ReqTimeout()) { + return nil, fmt.Errorf("incompatible with current running cluster") + } + scaleUpLearners := false + if err := membership.ValidateMaxLearnerConfig(cfg.ExperimentalMaxLearners, existingCluster.Members(), scaleUpLearners); err != nil { + return nil, err + } + remotes := existingCluster.Members() + cl.SetID(types.ID(0), existingCluster.ID()) + member := cl.MemberByName(cfg.Name) + return &bootstrappedCluster{ + remotes: remotes, + cl: cl, + nodeID: member.ID, + }, nil +} + +func bootstrapNewClusterNoWAL(cfg config.ServerConfig, prt http.RoundTripper) (*bootstrappedCluster, error) { + if err := cfg.VerifyBootstrap(); err != nil { + return nil, err + } + cl, err := membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, cfg.InitialPeerURLsMap, membership.WithMaxLearners(cfg.ExperimentalMaxLearners)) + if err != nil { + return nil, err + } + m := cl.MemberByName(cfg.Name) + if isMemberBootstrapped(cfg.Logger, cl, cfg.Name, prt, cfg.BootstrapTimeoutEffective()) { + return nil, fmt.Errorf("member %s has already been bootstrapped", m.ID) + } + if cfg.ShouldDiscover() { + var str string + if cfg.DiscoveryURL != "" { + cfg.Logger.Warn("V2 discovery is deprecated!") + str, err = v2discovery.JoinCluster(cfg.Logger, cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.InitialPeerURLsMap.String()) + } else { + cfg.Logger.Info("Bootstrapping cluster using v3 discovery.") + str, err = v3discovery.JoinCluster(cfg.Logger, &cfg.DiscoveryCfg, m.ID, cfg.InitialPeerURLsMap.String()) + } + if err != nil { + return nil, &servererrors.DiscoveryError{Op: "join", Err: err} + } + var urlsmap types.URLsMap + urlsmap, err = types.NewURLsMap(str) + if err != nil { + return nil, err + } + if config.CheckDuplicateURL(urlsmap) { + return nil, fmt.Errorf("discovery cluster %s has duplicate url", urlsmap) + } + if cl, err = membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, urlsmap, membership.WithMaxLearners(cfg.ExperimentalMaxLearners)); err != nil { + return nil, err + } + } + return &bootstrappedCluster{ + remotes: nil, + cl: cl, + nodeID: m.ID, + }, nil +} + +func bootstrapClusterWithWAL(cfg config.ServerConfig, meta *snapshotMetadata) (*bootstrappedCluster, error) { + if err := fileutil.IsDirWriteable(cfg.MemberDir()); err != nil { + return nil, fmt.Errorf("cannot write to member directory: %w", err) + } + + if cfg.ShouldDiscover() { + cfg.Logger.Warn( + "discovery token is ignored since cluster already initialized; valid logs are found", + zap.String("wal-dir", cfg.WALDir()), + ) + } + cl := membership.NewCluster(cfg.Logger, membership.WithMaxLearners(cfg.ExperimentalMaxLearners)) + + scaleUpLearners := false + if err := membership.ValidateMaxLearnerConfig(cfg.ExperimentalMaxLearners, cl.Members(), scaleUpLearners); err != nil { + return nil, err + } + + cl.SetID(meta.nodeID, meta.clusterID) + return &bootstrappedCluster{ + cl: cl, + nodeID: meta.nodeID, + }, nil +} + +func recoverSnapshot(cfg config.ServerConfig, st v2store.Store, be backend.Backend, beExist bool, beHooks *serverstorage.BackendHooks, ci cindex.ConsistentIndexer, ss *snap.Snapshotter) (*raftpb.Snapshot, backend.Backend, error) { + // Find a snapshot to start/restart a raft node + walSnaps, err := wal.ValidSnapshotEntries(cfg.Logger, cfg.WALDir()) + if err != nil { + return nil, be, err + } + // snapshot files can be orphaned if etcd crashes after writing them but before writing the corresponding + // bwal log entries + snapshot, err := ss.LoadNewestAvailable(walSnaps) + if err != nil && !errors.Is(err, snap.ErrNoSnapshot) { + return nil, be, err + } + + if snapshot != nil { + if err = st.Recovery(snapshot.Data); err != nil { + cfg.Logger.Panic("failed to recover from snapshot", zap.Error(err)) + } + + if err = serverstorage.AssertNoV2StoreContent(cfg.Logger, st, cfg.V2Deprecation); err != nil { + cfg.Logger.Error("illegal v2store content", zap.Error(err)) + return nil, be, err + } + + cfg.Logger.Info( + "recovered v2 store from snapshot", + zap.Uint64("snapshot-index", snapshot.Metadata.Index), + zap.String("snapshot-size", humanize.Bytes(uint64(snapshot.Size()))), + ) + + if be, err = serverstorage.RecoverSnapshotBackend(cfg, be, *snapshot, beExist, beHooks); err != nil { + cfg.Logger.Panic("failed to recover v3 backend from snapshot", zap.Error(err)) + } + // A snapshot db may have already been recovered, and the old db should have + // already been closed in this case, so we should set the backend again. + ci.SetBackend(be) + + if beExist { + // TODO: remove kvindex != 0 checking when we do not expect users to upgrade + // etcd from pre-3.0 release. + kvindex := ci.ConsistentIndex() + if kvindex < snapshot.Metadata.Index { + if kvindex != 0 { + return nil, be, fmt.Errorf("database file (%v index %d) does not match with snapshot (index %d)", cfg.BackendPath(), kvindex, snapshot.Metadata.Index) + } + cfg.Logger.Warn( + "consistent index was never saved", + zap.Uint64("snapshot-index", snapshot.Metadata.Index), + ) + } + } + } else { + cfg.Logger.Info("No snapshot found. Recovering WAL from scratch!") + } + return snapshot, be, nil +} + +func (c *bootstrappedCluster) Finalize(cfg config.ServerConfig, s *bootstrappedStorage) error { + if !s.wal.haveWAL { + c.cl.SetID(c.nodeID, c.cl.ID()) + } + c.cl.SetStore(s.st) + c.cl.SetBackend(schema.NewMembershipBackend(cfg.Logger, s.backend.be)) + if s.wal.haveWAL { + c.cl.Recover(api.UpdateCapability) + if c.databaseFileMissing(s) { + bepath := cfg.BackendPath() + os.RemoveAll(bepath) + return fmt.Errorf("database file (%v) of the backend is missing", bepath) + } + } + scaleUpLearners := false + return membership.ValidateMaxLearnerConfig(cfg.ExperimentalMaxLearners, c.cl.Members(), scaleUpLearners) +} + +func (c *bootstrappedCluster) databaseFileMissing(s *bootstrappedStorage) bool { + v3Cluster := c.cl.Version() != nil && !c.cl.Version().LessThan(semver.Version{Major: 3}) + return v3Cluster && !s.backend.beExist +} + +func bootstrapRaft(cfg config.ServerConfig, cluster *bootstrappedCluster, bwal *bootstrappedWAL) *bootstrappedRaft { + switch { + case !bwal.haveWAL && !cfg.NewCluster: + return bootstrapRaftFromCluster(cfg, cluster.cl, nil, bwal) + case !bwal.haveWAL && cfg.NewCluster: + return bootstrapRaftFromCluster(cfg, cluster.cl, cluster.cl.MemberIDs(), bwal) + case bwal.haveWAL: + return bootstrapRaftFromWAL(cfg, bwal) + default: + cfg.Logger.Panic("unsupported bootstrap config") + return nil + } +} + +func bootstrapRaftFromCluster(cfg config.ServerConfig, cl *membership.RaftCluster, ids []types.ID, bwal *bootstrappedWAL) *bootstrappedRaft { + member := cl.MemberByName(cfg.Name) + peers := make([]raft.Peer, len(ids)) + for i, id := range ids { + var ctx []byte + ctx, err := json.Marshal((*cl).Member(id)) + if err != nil { + cfg.Logger.Panic("failed to marshal member", zap.Error(err)) + } + peers[i] = raft.Peer{ID: uint64(id), Context: ctx} + } + cfg.Logger.Info( + "starting local member", + zap.String("local-member-id", member.ID.String()), + zap.String("cluster-id", cl.ID().String()), + ) + s := bwal.MemoryStorage() + return &bootstrappedRaft{ + lg: cfg.Logger, + heartbeat: time.Duration(cfg.TickMs) * time.Millisecond, + config: raftConfig(cfg, uint64(member.ID), s), + peers: peers, + storage: s, + } +} + +func bootstrapRaftFromWAL(cfg config.ServerConfig, bwal *bootstrappedWAL) *bootstrappedRaft { + s := bwal.MemoryStorage() + return &bootstrappedRaft{ + lg: cfg.Logger, + heartbeat: time.Duration(cfg.TickMs) * time.Millisecond, + config: raftConfig(cfg, uint64(bwal.meta.nodeID), s), + storage: s, + } +} + +func raftConfig(cfg config.ServerConfig, id uint64, s *raft.MemoryStorage) *raft.Config { + return &raft.Config{ + ID: id, + ElectionTick: cfg.ElectionTicks, + HeartbeatTick: 1, + Storage: s, + MaxSizePerMsg: maxSizePerMsg, + MaxInflightMsgs: maxInflightMsgs, + CheckQuorum: true, + PreVote: cfg.PreVote, + Logger: NewRaftLoggerZap(cfg.Logger.Named("raft")), + } +} + +func (b *bootstrappedRaft) newRaftNode(ss *snap.Snapshotter, wal *wal.WAL, cl *membership.RaftCluster) *raftNode { + var n raft.Node + if len(b.peers) == 0 { + n = raft.RestartNode(b.config) + } else { + n = raft.StartNode(b.config, b.peers) + } + raftStatusMu.Lock() + raftStatus = n.Status + raftStatusMu.Unlock() + return newRaftNode( + raftNodeConfig{ + lg: b.lg, + isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) }, + Node: n, + heartbeat: b.heartbeat, + raftStorage: b.storage, + storage: serverstorage.NewStorage(b.lg, wal, ss), + }, + ) +} + +func bootstrapWALFromSnapshot(cfg config.ServerConfig, snapshot *raftpb.Snapshot) *bootstrappedWAL { + wal, st, ents, snap, meta := openWALFromSnapshot(cfg, snapshot) + bwal := &bootstrappedWAL{ + lg: cfg.Logger, + w: wal, + st: st, + ents: ents, + snapshot: snap, + meta: meta, + haveWAL: true, + } + + if cfg.ForceNewCluster { + // discard the previously uncommitted entries + bwal.ents = bwal.CommitedEntries() + entries := bwal.NewConfigChangeEntries() + // force commit config change entries + bwal.AppendAndCommitEntries(entries) + cfg.Logger.Info( + "forcing restart member", + zap.String("cluster-id", meta.clusterID.String()), + zap.String("local-member-id", meta.nodeID.String()), + zap.Uint64("commit-index", bwal.st.Commit), + ) + } else { + cfg.Logger.Info( + "restarting local member", + zap.String("cluster-id", meta.clusterID.String()), + zap.String("local-member-id", meta.nodeID.String()), + zap.Uint64("commit-index", bwal.st.Commit), + ) + } + return bwal +} + +// openWALFromSnapshot reads the WAL at the given snap and returns the wal, its latest HardState and cluster ID, and all entries that appear +// after the position of the given snap in the WAL. +// The snap must have been previously saved to the WAL, or this call will panic. +func openWALFromSnapshot(cfg config.ServerConfig, snapshot *raftpb.Snapshot) (*wal.WAL, *raftpb.HardState, []raftpb.Entry, *raftpb.Snapshot, *snapshotMetadata) { + var walsnap walpb.Snapshot + if snapshot != nil { + walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term + } + repaired := false + for { + w, err := wal.Open(cfg.Logger, cfg.WALDir(), walsnap) + if err != nil { + cfg.Logger.Fatal("failed to open WAL", zap.Error(err)) + } + if cfg.UnsafeNoFsync { + w.SetUnsafeNoFsync() + } + wmetadata, st, ents, err := w.ReadAll() + if err != nil { + w.Close() + // we can only repair ErrUnexpectedEOF and we never repair twice. + if repaired || !errors.Is(err, io.ErrUnexpectedEOF) { + cfg.Logger.Fatal("failed to read WAL, cannot be repaired", zap.Error(err)) + } + if !wal.Repair(cfg.Logger, cfg.WALDir()) { + cfg.Logger.Fatal("failed to repair WAL", zap.Error(err)) + } else { + cfg.Logger.Info("repaired WAL", zap.Error(err)) + repaired = true + } + continue + } + var metadata etcdserverpb.Metadata + pbutil.MustUnmarshal(&metadata, wmetadata) + id := types.ID(metadata.NodeID) + cid := types.ID(metadata.ClusterID) + meta := &snapshotMetadata{clusterID: cid, nodeID: id} + return w, &st, ents, snapshot, meta + } +} + +type snapshotMetadata struct { + nodeID, clusterID types.ID +} + +func bootstrapNewWAL(cfg config.ServerConfig, cl *bootstrappedCluster) *bootstrappedWAL { + metadata := pbutil.MustMarshal( + &etcdserverpb.Metadata{ + NodeID: uint64(cl.nodeID), + ClusterID: uint64(cl.cl.ID()), + }, + ) + w, err := wal.Create(cfg.Logger, cfg.WALDir(), metadata) + if err != nil { + cfg.Logger.Panic("failed to create WAL", zap.Error(err)) + } + if cfg.UnsafeNoFsync { + w.SetUnsafeNoFsync() + } + return &bootstrappedWAL{ + lg: cfg.Logger, + w: w, + } +} + +type bootstrappedWAL struct { + lg *zap.Logger + + haveWAL bool + w *wal.WAL + st *raftpb.HardState + ents []raftpb.Entry + snapshot *raftpb.Snapshot + meta *snapshotMetadata +} + +func (wal *bootstrappedWAL) MemoryStorage() *raft.MemoryStorage { + s := raft.NewMemoryStorage() + if wal.snapshot != nil { + s.ApplySnapshot(*wal.snapshot) + } + if wal.st != nil { + s.SetHardState(*wal.st) + } + if len(wal.ents) != 0 { + s.Append(wal.ents) + } + return s +} + +func (wal *bootstrappedWAL) CommitedEntries() []raftpb.Entry { + for i, ent := range wal.ents { + if ent.Index > wal.st.Commit { + wal.lg.Info( + "discarding uncommitted WAL entries", + zap.Uint64("entry-index", ent.Index), + zap.Uint64("commit-index-from-wal", wal.st.Commit), + zap.Int("number-of-discarded-entries", len(wal.ents)-i), + ) + return wal.ents[:i] + } + } + return wal.ents +} + +func (wal *bootstrappedWAL) NewConfigChangeEntries() []raftpb.Entry { + return serverstorage.CreateConfigChangeEnts( + wal.lg, + serverstorage.GetEffectiveNodeIDsFromWALEntries(wal.lg, wal.snapshot, wal.ents), + uint64(wal.meta.nodeID), + wal.st.Term, + wal.st.Commit, + ) +} + +func (wal *bootstrappedWAL) AppendAndCommitEntries(ents []raftpb.Entry) { + wal.ents = append(wal.ents, ents...) + err := wal.w.Save(raftpb.HardState{}, ents) + if err != nil { + wal.lg.Fatal("failed to save hard state and entries", zap.Error(err)) + } + if len(wal.ents) != 0 { + wal.st.Commit = wal.ents[len(wal.ents)-1].Index + } +} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/cindex/cindex.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/cindex/cindex.go index ac6ae63742f71..200008ff9b3a0 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/cindex/cindex.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/cindex/cindex.go @@ -15,16 +15,14 @@ package cindex import ( - "encoding/binary" "sync" "sync/atomic" - "go.etcd.io/etcd/server/v3/mvcc/backend" - "go.etcd.io/etcd/server/v3/mvcc/buckets" + "go.etcd.io/etcd/server/v3/storage/backend" + "go.etcd.io/etcd/server/v3/storage/schema" ) type Backend interface { - BatchTx() backend.BatchTx ReadTx() backend.ReadTx } @@ -48,7 +46,7 @@ type ConsistentIndexer interface { // UnsafeSave must be called holding the lock on the tx. // It saves consistentIndex to the underlying stable storage. - UnsafeSave(tx backend.BatchTx) + UnsafeSave(tx backend.UnsafeReadWriter) // SetBackend set the available backend.BatchTx for ConsistentIndexer. SetBackend(be Backend) @@ -68,6 +66,13 @@ type consistentIndex struct { // applyingIndex and applyingTerm are just temporary cache of the raftpb.Entry.Index // and raftpb.Entry.Term, and they are not ready to be persisted yet. They will be // saved to consistentIndex and term above in the txPostLockInsideApplyHook. + // + // TODO(ahrtr): try to remove the OnPreCommitUnsafe, and compare the + // performance difference. Afterwards we can make a decision on whether + // or not we should remove OnPreCommitUnsafe. If it is true, then we + // can remove applyingIndex and applyingTerm, and save the e.Index and + // e.Term to consistentIndex and term directly in applyEntries, and + // persist them into db in the txPostLockInsideApplyHook. applyingIndex uint64 applyingTerm uint64 @@ -90,7 +95,7 @@ func (ci *consistentIndex) ConsistentIndex() uint64 { ci.mutex.Lock() defer ci.mutex.Unlock() - v, term := ReadConsistentIndex(ci.be.ReadTx()) + v, term := schema.ReadConsistentIndex(ci.be.ReadTx()) ci.SetConsistentIndex(v, term) return v } @@ -100,7 +105,7 @@ func (ci *consistentIndex) UnsafeConsistentIndex() uint64 { return index } - v, term := unsafeReadConsistentIndex(ci.be.ReadTx()) + v, term := schema.UnsafeReadConsistentIndex(ci.be.ReadTx()) ci.SetConsistentIndex(v, term) return v } @@ -110,10 +115,10 @@ func (ci *consistentIndex) SetConsistentIndex(v uint64, term uint64) { atomic.StoreUint64(&ci.term, term) } -func (ci *consistentIndex) UnsafeSave(tx backend.BatchTx) { +func (ci *consistentIndex) UnsafeSave(tx backend.UnsafeReadWriter) { index := atomic.LoadUint64(&ci.consistentIndex) term := atomic.LoadUint64(&ci.term) - UnsafeUpdateConsistentIndex(tx, index, term) + schema.UnsafeUpdateConsistentIndex(tx, index, term) } func (ci *consistentIndex) SetBackend(be Backend) { @@ -161,66 +166,11 @@ func (f *fakeConsistentIndex) SetConsistentApplyingIndex(index uint64, term uint atomic.StoreUint64(&f.term, term) } -func (f *fakeConsistentIndex) UnsafeSave(_ backend.BatchTx) {} -func (f *fakeConsistentIndex) SetBackend(_ Backend) {} +func (f *fakeConsistentIndex) UnsafeSave(_ backend.UnsafeReadWriter) {} +func (f *fakeConsistentIndex) SetBackend(_ Backend) {} -// UnsafeCreateMetaBucket creates the `meta` bucket (if it does not exists yet). -func UnsafeCreateMetaBucket(tx backend.BatchTx) { - tx.UnsafeCreateBucket(buckets.Meta) -} - -// CreateMetaBucket creates the `meta` bucket (if it does not exists yet). -func CreateMetaBucket(tx backend.BatchTx) { - tx.LockOutsideApply() - defer tx.Unlock() - tx.UnsafeCreateBucket(buckets.Meta) -} - -// unsafeGetConsistentIndex loads consistent index & term from given transaction. -// returns 0,0 if the data are not found. -// Term is persisted since v3.5. -func unsafeReadConsistentIndex(tx backend.ReadTx) (uint64, uint64) { - _, vs := tx.UnsafeRange(buckets.Meta, buckets.MetaConsistentIndexKeyName, nil, 0) - if len(vs) == 0 { - return 0, 0 - } - v := binary.BigEndian.Uint64(vs[0]) - _, ts := tx.UnsafeRange(buckets.Meta, buckets.MetaTermKeyName, nil, 0) - if len(ts) == 0 { - return v, 0 - } - t := binary.BigEndian.Uint64(ts[0]) - return v, t -} - -// ReadConsistentIndex loads consistent index and term from given transaction. -// returns 0 if the data are not found. -func ReadConsistentIndex(tx backend.ReadTx) (uint64, uint64) { - tx.Lock() - defer tx.Unlock() - return unsafeReadConsistentIndex(tx) -} - -func UnsafeUpdateConsistentIndex(tx backend.BatchTx, index uint64, term uint64) { - if index == 0 { - // Never save 0 as it means that we didn't loaded the real index yet. - return - } - - bs1 := make([]byte, 8) - binary.BigEndian.PutUint64(bs1, index) - // put the index into the underlying backend - // tx has been locked in TxnBegin, so there is no need to lock it again - tx.UnsafePut(buckets.Meta, buckets.MetaConsistentIndexKeyName, bs1) - if term > 0 { - bs2 := make([]byte, 8) - binary.BigEndian.PutUint64(bs2, term) - tx.UnsafePut(buckets.Meta, buckets.MetaTermKeyName, bs2) - } -} - -func UpdateConsistentIndex(tx backend.BatchTx, index uint64, term uint64) { +func UpdateConsistentIndexForce(tx backend.BatchTx, index uint64, term uint64) { tx.LockOutsideApply() defer tx.Unlock() - UnsafeUpdateConsistentIndex(tx, index, term) + schema.UnsafeUpdateConsistentIndexForce(tx, index, term) } diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/cluster_util.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/cluster_util.go index eace8f69afc4c..12e5e9c2d2473 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/cluster_util.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/cluster_util.go @@ -18,19 +18,21 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "sort" "strconv" "strings" "time" + "github.com/coreos/go-semver/semver" + "go.uber.org/zap" + "go.etcd.io/etcd/api/v3/version" "go.etcd.io/etcd/client/pkg/v3/types" "go.etcd.io/etcd/server/v3/etcdserver/api/membership" - - "github.com/coreos/go-semver/semver" - "go.uber.org/zap" + "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" + "go.etcd.io/etcd/server/v3/etcdserver/errors" ) // isMemberBootstrapped tries to check if the given member has been bootstrapped @@ -83,7 +85,7 @@ func getClusterFromRemotePeers(lg *zap.Logger, urls []string, timeout time.Durat } continue } - b, err := ioutil.ReadAll(resp.Body) + b, err := io.ReadAll(resp.Body) resp.Body.Close() if err != nil { if logerr { @@ -137,11 +139,11 @@ func getRemotePeerURLs(cl *membership.RaftCluster, local string) []string { return us } -// getVersions returns the versions of the members in the given cluster. +// getMembersVersions returns the versions of the members in the given cluster. // The key of the returned map is the member's ID. The value of the returned map // is the semver versions string, including server and cluster. // If it fails to get the version of a member, the key will be nil. -func getVersions(lg *zap.Logger, cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) map[string]*version.Versions { +func getMembersVersions(lg *zap.Logger, cl *membership.RaftCluster, local types.ID, rt http.RoundTripper, timeout time.Duration) map[string]*version.Versions { members := cl.Members() vers := make(map[string]*version.Versions) for _, m := range members { @@ -153,7 +155,7 @@ func getVersions(lg *zap.Logger, cl *membership.RaftCluster, local types.ID, rt vers[m.ID.String()] = &version.Versions{Server: version.Version, Cluster: cv} continue } - ver, err := getVersion(lg, m, rt) + ver, err := getVersion(lg, m, rt, timeout) if err != nil { lg.Warn("failed to get version", zap.String("remote-member-id", m.ID.String()), zap.Error(err)) vers[m.ID.String()] = nil @@ -164,44 +166,6 @@ func getVersions(lg *zap.Logger, cl *membership.RaftCluster, local types.ID, rt return vers } -// decideClusterVersion decides the cluster version based on the versions map. -// The returned version is the min server version in the map, or nil if the min -// version in unknown. -func decideClusterVersion(lg *zap.Logger, vers map[string]*version.Versions) *semver.Version { - var cv *semver.Version - lv := semver.Must(semver.NewVersion(version.Version)) - - for mid, ver := range vers { - if ver == nil { - return nil - } - v, err := semver.NewVersion(ver.Server) - if err != nil { - lg.Warn( - "failed to parse server version of remote member", - zap.String("remote-member-id", mid), - zap.String("remote-member-version", ver.Server), - zap.Error(err), - ) - return nil - } - if lv.LessThan(*v) { - lg.Warn( - "leader found higher-versioned member", - zap.String("local-member-version", lv.String()), - zap.String("remote-member-id", mid), - zap.String("remote-member-version", ver.Server), - ) - } - if cv == nil { - cv = v - } else if v.LessThan(*cv) { - cv = v - } - } - return cv -} - // allowedVersionRange decides the available version range of the cluster that local server can join in; // if the downgrade enabled status is true, the version window is [oneMinorHigher, oneMinorHigher] // if the downgrade is not enabled, the version window is [MinClusterVersion, localVersion] @@ -224,9 +188,9 @@ func allowedVersionRange(downgradeEnabled bool) (minV *semver.Version, maxV *sem // cluster version in the range of [MinV, MaxV] and no known members has a cluster version // out of the range. // We set this rule since when the local member joins, another member might be offline. -func isCompatibleWithCluster(lg *zap.Logger, cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) bool { - vers := getVersions(lg, cl, local, rt) - minV, maxV := allowedVersionRange(getDowngradeEnabledFromRemotePeers(lg, cl, local, rt)) +func isCompatibleWithCluster(lg *zap.Logger, cl *membership.RaftCluster, local types.ID, rt http.RoundTripper, timeout time.Duration) bool { + vers := getMembersVersions(lg, cl, local, rt, timeout) + minV, maxV := allowedVersionRange(getDowngradeEnabledFromRemotePeers(lg, cl, local, rt, timeout)) return isCompatibleWithVers(lg, vers, local, minV, maxV) } @@ -264,7 +228,7 @@ func isCompatibleWithVers(lg *zap.Logger, vers map[string]*version.Versions, loc "cluster version of remote member is not compatible; too high", zap.String("remote-member-id", id), zap.String("remote-member-cluster-version", clusterv.String()), - zap.String("minimum-cluster-version-supported", minV.String()), + zap.String("maximum-cluster-version-supported", maxV.String()), ) return false } @@ -275,9 +239,10 @@ func isCompatibleWithVers(lg *zap.Logger, vers map[string]*version.Versions, loc // getVersion returns the Versions of the given member via its // peerURLs. Returns the last error if it fails to get the version. -func getVersion(lg *zap.Logger, m *membership.Member, rt http.RoundTripper) (*version.Versions, error) { +func getVersion(lg *zap.Logger, m *membership.Member, rt http.RoundTripper, timeout time.Duration) (*version.Versions, error) { cc := &http.Client{ Transport: rt, + Timeout: timeout, CheckRedirect: func(req *http.Request, via []*http.Request) error { return http.ErrUseLastResponse }, @@ -300,7 +265,7 @@ func getVersion(lg *zap.Logger, m *membership.Member, rt http.RoundTripper) (*ve continue } var b []byte - b, err = ioutil.ReadAll(resp.Body) + b, err = io.ReadAll(resp.Body) resp.Body.Close() if err != nil { lg.Warn( @@ -335,8 +300,8 @@ func promoteMemberHTTP(ctx context.Context, url string, id uint64, peerRt http.R } // TODO: refactor member http handler code // cannot import etcdhttp, so manually construct url - requestUrl := url + "/members/promote/" + fmt.Sprintf("%d", id) - req, err := http.NewRequest("POST", requestUrl, nil) + requestURL := url + "/members/promote/" + fmt.Sprintf("%d", id) + req, err := http.NewRequest(http.MethodPost, requestURL, nil) if err != nil { return nil, err } @@ -346,30 +311,30 @@ func promoteMemberHTTP(ctx context.Context, url string, id uint64, peerRt http.R return nil, err } defer resp.Body.Close() - b, err := ioutil.ReadAll(resp.Body) + b, err := io.ReadAll(resp.Body) if err != nil { return nil, err } if resp.StatusCode == http.StatusRequestTimeout { - return nil, ErrTimeout + return nil, errors.ErrTimeout } if resp.StatusCode == http.StatusPreconditionFailed { // both ErrMemberNotLearner and ErrLearnerNotReady have same http status code - if strings.Contains(string(b), ErrLearnerNotReady.Error()) { - return nil, ErrLearnerNotReady + if strings.Contains(string(b), errors.ErrLearnerNotReady.Error()) { + return nil, errors.ErrLearnerNotReady } if strings.Contains(string(b), membership.ErrMemberNotLearner.Error()) { return nil, membership.ErrMemberNotLearner } - return nil, fmt.Errorf("member promote: unknown error(%s)", string(b)) + return nil, fmt.Errorf("member promote: unknown error(%s)", b) } if resp.StatusCode == http.StatusNotFound { return nil, membership.ErrIDNotFound } if resp.StatusCode != http.StatusOK { // all other types of errors - return nil, fmt.Errorf("member promote: unknown error(%s)", string(b)) + return nil, fmt.Errorf("member promote: unknown error(%s)", b) } var membs []*membership.Member @@ -380,14 +345,14 @@ func promoteMemberHTTP(ctx context.Context, url string, id uint64, peerRt http.R } // getDowngradeEnabledFromRemotePeers will get the downgrade enabled status of the cluster. -func getDowngradeEnabledFromRemotePeers(lg *zap.Logger, cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) bool { +func getDowngradeEnabledFromRemotePeers(lg *zap.Logger, cl *membership.RaftCluster, local types.ID, rt http.RoundTripper, timeout time.Duration) bool { members := cl.Members() for _, m := range members { if m.ID == local { continue } - enable, err := getDowngradeEnabled(lg, m, rt) + enable, err := getDowngradeEnabled(lg, m, rt, timeout) if err != nil { lg.Warn("failed to get downgrade enabled status", zap.String("remote-member-id", m.ID.String()), zap.Error(err)) } else { @@ -401,9 +366,10 @@ func getDowngradeEnabledFromRemotePeers(lg *zap.Logger, cl *membership.RaftClust // getDowngradeEnabled returns the downgrade enabled status of the given member // via its peerURLs. Returns the last error if it fails to get it. -func getDowngradeEnabled(lg *zap.Logger, m *membership.Member, rt http.RoundTripper) (bool, error) { +func getDowngradeEnabled(lg *zap.Logger, m *membership.Member, rt http.RoundTripper, timeout time.Duration) (bool, error) { cc := &http.Client{ Transport: rt, + Timeout: timeout, CheckRedirect: func(req *http.Request, via []*http.Request) error { return http.ErrUseLastResponse }, @@ -426,7 +392,7 @@ func getDowngradeEnabled(lg *zap.Logger, m *membership.Member, rt http.RoundTrip continue } var b []byte - b, err = ioutil.ReadAll(resp.Body) + b, err = io.ReadAll(resp.Body) resp.Body.Close() if err != nil { lg.Warn( @@ -452,45 +418,27 @@ func getDowngradeEnabled(lg *zap.Logger, m *membership.Member, rt http.RoundTrip return false, err } -// isMatchedVersions returns true if all server versions are equal to target version, otherwise return false. -// It can be used to decide the whether the cluster finishes downgrading to target version. -func isMatchedVersions(lg *zap.Logger, targetVersion *semver.Version, vers map[string]*version.Versions) bool { - for mid, ver := range vers { - if ver == nil { - return false - } - v, err := semver.NewVersion(ver.Cluster) - if err != nil { - lg.Warn( - "failed to parse server version of remote member", - zap.String("remote-member-id", mid), - zap.String("remote-member-version", ver.Server), - zap.Error(err), - ) - return false - } - if !targetVersion.Equal(*v) { - lg.Warn("remotes server has mismatching etcd version", - zap.String("remote-member-id", mid), - zap.String("current-server-version", v.String()), - zap.String("target-version", targetVersion.String()), - ) - return false - } - } - return true -} - func convertToClusterVersion(v string) (*semver.Version, error) { ver, err := semver.NewVersion(v) if err != nil { // allow input version format Major.Minor ver, err = semver.NewVersion(v + ".0") if err != nil { - return nil, ErrWrongDowngradeVersionFormat + return nil, errors.ErrWrongDowngradeVersionFormat } } // cluster version only keeps major.minor, remove patch version ver = &semver.Version{Major: ver.Major, Minor: ver.Minor} return ver, nil } + +func GetMembershipInfoInV2Format(lg *zap.Logger, cl *membership.RaftCluster) []byte { + var st v2store.Store + st = v2store.New(StoreClusterPrefix, StoreKeysPrefix) + cl.Store(st) + d, err := st.SaveNoCopy() + if err != nil { + lg.Panic("failed to save v2 store", zap.Error(err)) + } + return d +} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/corrupt.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/corrupt.go index 852b844a86399..5c16ffb7c9ca6 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/corrupt.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/corrupt.go @@ -18,22 +18,22 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" - "io/ioutil" + "io" "net/http" "sort" "strings" "sync" "time" + "go.uber.org/zap" + pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/pkg/v3/traceutil" "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp" - "go.etcd.io/etcd/server/v3/mvcc" - - "go.uber.org/zap" + "go.etcd.io/etcd/server/v3/storage/mvcc" ) type CorruptionChecker interface { @@ -54,7 +54,7 @@ type corruptionChecker struct { type Hasher interface { mvcc.HashStorage ReqTimeout() time.Duration - MemberId() types.ID + MemberID() types.ID PeerHashByRev(int64) []*peerHashKVResp LinearizableReadNotify(context.Context) error TriggerCorruptAlarm(types.ID) @@ -72,10 +72,6 @@ type hasherAdapter struct { mvcc.HashStorage } -func (h hasherAdapter) MemberId() types.ID { - return h.EtcdServer.ID() -} - func (h hasherAdapter) ReqTimeout() time.Duration { return h.EtcdServer.Cfg.ReqTimeout() } @@ -92,25 +88,24 @@ func (h hasherAdapter) TriggerCorruptAlarm(memberID types.ID) { // before serving any peer/client traffic. Only mismatch when hashes // are different at requested revision, with same compact revision. func (cm *corruptionChecker) InitialCheck() error { - cm.lg.Info( "starting initial corruption check", - zap.String("local-member-id", cm.hasher.MemberId().String()), + zap.String("local-member-id", cm.hasher.MemberID().String()), zap.Duration("timeout", cm.hasher.ReqTimeout()), ) - h, rev, err := cm.hasher.HashByRev(0) + h, _, err := cm.hasher.HashByRev(0) if err != nil { - return fmt.Errorf("%s failed to fetch hash (%v)", cm.hasher.MemberId(), err) + return fmt.Errorf("%s failed to fetch hash (%w)", cm.hasher.MemberID(), err) } - peers := cm.hasher.PeerHashByRev(rev) + peers := cm.hasher.PeerHashByRev(h.Revision) mismatch := 0 for _, p := range peers { if p.resp != nil { peerID := types.ID(p.resp.Header.MemberId) fields := []zap.Field{ - zap.String("local-member-id", cm.hasher.MemberId().String()), - zap.Int64("local-member-revision", rev), + zap.String("local-member-id", cm.hasher.MemberID().String()), + zap.Int64("local-member-revision", h.Revision), zap.Int64("local-member-compact-revision", h.CompactRevision), zap.Uint32("local-member-hash", h.Hash), zap.String("remote-peer-id", peerID.String()), @@ -133,33 +128,33 @@ func (cm *corruptionChecker) InitialCheck() error { } if p.err != nil { - switch p.err { - case rpctypes.ErrFutureRev: + switch { + case errors.Is(p.err, rpctypes.ErrFutureRev): cm.lg.Warn( "cannot fetch hash from slow remote peer", - zap.String("local-member-id", cm.hasher.MemberId().String()), - zap.Int64("local-member-revision", rev), + zap.String("local-member-id", cm.hasher.MemberID().String()), + zap.Int64("local-member-revision", h.Revision), zap.Int64("local-member-compact-revision", h.CompactRevision), zap.Uint32("local-member-hash", h.Hash), zap.String("remote-peer-id", p.id.String()), zap.Strings("remote-peer-endpoints", p.eps), zap.Error(err), ) - case rpctypes.ErrCompacted: + case errors.Is(p.err, rpctypes.ErrCompacted): cm.lg.Warn( "cannot fetch hash from remote peer; local member is behind", - zap.String("local-member-id", cm.hasher.MemberId().String()), - zap.Int64("local-member-revision", rev), + zap.String("local-member-id", cm.hasher.MemberID().String()), + zap.Int64("local-member-revision", h.Revision), zap.Int64("local-member-compact-revision", h.CompactRevision), zap.Uint32("local-member-hash", h.Hash), zap.String("remote-peer-id", p.id.String()), zap.Strings("remote-peer-endpoints", p.eps), zap.Error(err), ) - case rpctypes.ErrClusterIdMismatch: + case errors.Is(p.err, rpctypes.ErrClusterIDMismatch): cm.lg.Warn( "cluster ID mismatch", - zap.String("local-member-id", cm.hasher.MemberId().String()), + zap.String("local-member-id", cm.hasher.MemberID().String()), zap.Int64("local-member-revision", h.Revision), zap.Int64("local-member-compact-revision", h.CompactRevision), zap.Uint32("local-member-hash", h.Hash), @@ -171,22 +166,22 @@ func (cm *corruptionChecker) InitialCheck() error { } } if mismatch > 0 { - return fmt.Errorf("%s found data inconsistency with peers", cm.hasher.MemberId()) + return fmt.Errorf("%s found data inconsistency with peers", cm.hasher.MemberID()) } cm.lg.Info( "initial corruption checking passed; no corruption", - zap.String("local-member-id", cm.hasher.MemberId().String()), + zap.String("local-member-id", cm.hasher.MemberID().String()), ) return nil } func (cm *corruptionChecker) PeriodicCheck() error { - h, rev, err := cm.hasher.HashByRev(0) + h, _, err := cm.hasher.HashByRev(0) if err != nil { return err } - peers := cm.hasher.PeerHashByRev(rev) + peers := cm.hasher.PeerHashByRev(h.Revision) ctx, cancel := context.WithTimeout(context.Background(), cm.hasher.ReqTimeout()) err = cm.hasher.LinearizableReadNotify(ctx) @@ -206,24 +201,20 @@ func (cm *corruptionChecker) PeriodicCheck() error { return } alarmed = true - // It isn't clear which member's data is corrupted, so we - // intentionally set the memberID as 0. We will identify - // the corrupted members using quorum in 3.6. Please see - // discussion in https://github.com/etcd-io/etcd/pull/14828. - cm.hasher.TriggerCorruptAlarm(types.ID(0)) + cm.hasher.TriggerCorruptAlarm(id) } - if h2.Hash != h.Hash && rev2 == rev && h.CompactRevision == h2.CompactRevision { + if h2.Hash != h.Hash && h2.Revision == h.Revision && h.CompactRevision == h2.CompactRevision { cm.lg.Warn( "found hash mismatch", - zap.Int64("revision-1", rev), + zap.Int64("revision-1", h.Revision), zap.Int64("compact-revision-1", h.CompactRevision), zap.Uint32("hash-1", h.Hash), - zap.Int64("revision-2", rev2), + zap.Int64("revision-2", h2.Revision), zap.Int64("compact-revision-2", h2.CompactRevision), zap.Uint32("hash-2", h2.Hash), ) - mismatch(cm.hasher.MemberId()) + mismatch(cm.hasher.MemberID()) } checkedCount := 0 @@ -272,9 +263,20 @@ func (cm *corruptionChecker) PeriodicCheck() error { return nil } +// CompactHashCheck is based on the fact that 'compactions' are coordinated +// between raft members and performed at the same revision. For each compacted +// revision there is KV store hash computed and saved for some time. +// +// This method communicates with peers to find a recent common revision across +// members, and raises alarm if 2 or more members at the same compact revision +// have different hashes. +// +// We might miss opportunity to perform the check if the compaction is still +// ongoing on one of the members, or it was unresponsive. In such situation the +// method still passes without raising alarm. func (cm *corruptionChecker) CompactHashCheck() { cm.lg.Info("starting compact hash check", - zap.String("local-member-id", cm.hasher.MemberId().String()), + zap.String("local-member-id", cm.hasher.MemberID().String()), zap.Duration("timeout", cm.hasher.ReqTimeout()), ) hashes := cm.uncheckedRevisions() @@ -284,56 +286,132 @@ func (cm *corruptionChecker) CompactHashCheck() { if len(peers) == 0 { continue } - peersChecked := 0 - for _, p := range peers { - if p.resp == nil || p.resp.CompactRevision != hash.CompactRevision { - continue - } + if cm.checkPeerHashes(hash, peers) { + cm.lg.Info("finished compaction hash check", zap.Int("number-of-hashes-checked", i+1)) + return + } + } + cm.lg.Info("finished compaction hash check", zap.Int("number-of-hashes-checked", len(hashes))) + return +} - // follower's compact revision is leader's old one, then hashes must match - if p.resp.Hash != hash.Hash { - // It isn't clear which member's data is corrupted, so we - // intentionally set the memberID as 0. We will identify - // the corrupted members using quorum in 3.6. Please see - // discussion in https://github.com/etcd-io/etcd/pull/14828. - cm.hasher.TriggerCorruptAlarm(types.ID(0)) - cm.lg.Error("failed compaction hash check", - zap.Int64("revision", hash.Revision), - zap.Int64("leader-compact-revision", hash.CompactRevision), - zap.Uint32("leader-hash", hash.Hash), - zap.Int64("follower-compact-revision", p.resp.CompactRevision), - zap.Uint32("follower-hash", p.resp.Hash), - zap.String("follower-peer-id", p.id.String()), - ) - return - } - peersChecked++ - cm.lg.Info("successfully checked hash on follower", - zap.Int64("revision", hash.Revision), - zap.String("peer-id", p.id.String()), - ) +// check peers hash and raise alarms if detected corruption. +// return a bool indicate whether to check next hash. +// +// true: successfully checked hash on whole cluster or raised alarms, so no need to check next hash +// false: skipped some members, so need to check next hash +func (cm *corruptionChecker) checkPeerHashes(leaderHash mvcc.KeyValueHash, peers []*peerHashKVResp) bool { + leaderID := cm.hasher.MemberID() + hash2members := map[uint32]types.IDSlice{leaderHash.Hash: {leaderID}} + + peersChecked := 0 + // group all peers by hash + for _, peer := range peers { + skipped := false + reason := "" + + if peer.resp == nil { + skipped = true + reason = "no response" + } else if peer.resp.CompactRevision != leaderHash.CompactRevision { + skipped = true + reason = fmt.Sprintf("the peer's CompactRevision %d doesn't match leader's CompactRevision %d", + peer.resp.CompactRevision, leaderHash.CompactRevision) } - if len(peers) == peersChecked { - cm.lg.Info("successfully checked hash on whole cluster", - zap.Int("number-of-peers-checked", peersChecked), - zap.Int64("revision", hash.Revision), - ) - cm.mux.Lock() - if hash.Revision > cm.latestRevisionChecked { - cm.latestRevisionChecked = hash.Revision + if skipped { + cm.lg.Warn("Skipped peer's hash", zap.Int("number-of-peers", len(peers)), + zap.String("leader-id", leaderID.String()), + zap.String("peer-id", peer.id.String()), + zap.String("reason", reason)) + continue + } + + peersChecked++ + if ids, ok := hash2members[peer.resp.Hash]; !ok { + hash2members[peer.resp.Hash] = []types.ID{peer.id} + } else { + ids = append(ids, peer.id) + hash2members[peer.resp.Hash] = ids + } + } + + // All members have the same CompactRevision and Hash. + if len(hash2members) == 1 { + return cm.handleConsistentHash(leaderHash, peersChecked, len(peers)) + } + + // Detected hashes mismatch + // The first step is to figure out the majority with the same hash. + memberCnt := len(peers) + 1 + quorum := memberCnt/2 + 1 + quorumExist := false + for k, v := range hash2members { + if len(v) >= quorum { + quorumExist = true + // remove the majority, and we might raise alarms for the left members. + delete(hash2members, k) + break + } + } + + if !quorumExist { + // If quorum doesn't exist, we don't know which members data are + // corrupted. In such situation, we intentionally set the memberID + // as 0, it means it affects the whole cluster. + cm.lg.Error("Detected compaction hash mismatch but cannot identify the corrupted members, so intentionally set the memberID as 0", + zap.String("leader-id", leaderID.String()), + zap.Int64("leader-revision", leaderHash.Revision), + zap.Int64("leader-compact-revision", leaderHash.CompactRevision), + zap.Uint32("leader-hash", leaderHash.Hash), + ) + cm.hasher.TriggerCorruptAlarm(0) + } + + // Raise alarm for the left members if the quorum is present. + // But we should always generate error log for debugging. + for k, v := range hash2members { + if quorumExist { + for _, pid := range v { + cm.hasher.TriggerCorruptAlarm(pid) } - cm.mux.Unlock() - cm.lg.Info("finished compaction hash check", zap.Int("number-of-hashes-checked", i+1)) - return } - cm.lg.Warn("skipped revision in compaction hash check; was not able to check all peers", + + cm.lg.Error("Detected compaction hash mismatch", + zap.String("leader-id", leaderID.String()), + zap.Int64("leader-revision", leaderHash.Revision), + zap.Int64("leader-compact-revision", leaderHash.CompactRevision), + zap.Uint32("leader-hash", leaderHash.Hash), + zap.Uint32("peer-hash", k), + zap.String("peer-ids", v.String()), + zap.Bool("quorum-exist", quorumExist), + ) + } + + return true +} + +func (cm *corruptionChecker) handleConsistentHash(hash mvcc.KeyValueHash, peersChecked, peerCnt int) bool { + if peersChecked == peerCnt { + cm.lg.Info("successfully checked hash on whole cluster", zap.Int("number-of-peers-checked", peersChecked), - zap.Int("number-of-peers", len(peers)), zap.Int64("revision", hash.Revision), + zap.Int64("compactRevision", hash.CompactRevision), ) + cm.mux.Lock() + if hash.Revision > cm.latestRevisionChecked { + cm.latestRevisionChecked = hash.Revision + } + cm.mux.Unlock() + return true } - cm.lg.Info("finished compaction hash check", zap.Int("number-of-hashes-checked", len(hashes))) - return + cm.lg.Warn("skipped revision in compaction hash check; was not able to check all peers", + zap.Int("number-of-peers-checked", peersChecked), + zap.Int("number-of-peers", peerCnt), + zap.Int64("revision", hash.Revision), + zap.Int64("compactRevision", hash.CompactRevision), + ) + // The only case which needs to check next hash + return false } func (cm *corruptionChecker) uncheckedRevisions() []mvcc.KeyValueHash { @@ -382,7 +460,7 @@ func (s *EtcdServer) getPeerHashKVs(rev int64) []*peerHashKVResp { members := s.cluster.Members() peers := make([]peerInfo, 0, len(members)) for _, m := range members { - if m.ID == s.ID() { + if m.ID == s.MemberID() { continue } peers = append(peers, peerInfo{id: m.ID, eps: m.PeerURLs}) @@ -405,8 +483,10 @@ func (s *EtcdServer) getPeerHashKVs(rev int64) []*peerHashKVResp { respsLen := len(resps) var lastErr error for _, ep := range p.eps { + var resp *pb.HashKVResponse + ctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout()) - resp, lastErr := HashByRev(ctx, s.cluster.ID(), cc, ep, rev) + resp, lastErr = HashByRev(ctx, s.cluster.ID(), cc, ep, rev) cancel() if lastErr == nil { resps = append(resps, &peerHashKVResp{peerInfo: p, resp: resp, err: nil}) @@ -414,7 +494,7 @@ func (s *EtcdServer) getPeerHashKVs(rev int64) []*peerHashKVResp { } lg.Warn( "failed hash kv request", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberID().String()), zap.Int64("requested-revision", rev), zap.String("remote-peer-endpoint", ep), zap.Error(lastErr), @@ -429,40 +509,6 @@ func (s *EtcdServer) getPeerHashKVs(rev int64) []*peerHashKVResp { return resps } -type applierV3Corrupt struct { - applierV3 -} - -func newApplierV3Corrupt(a applierV3) *applierV3Corrupt { return &applierV3Corrupt{a} } - -func (a *applierV3Corrupt) Put(ctx context.Context, txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) { - return nil, nil, ErrCorrupt -} - -func (a *applierV3Corrupt) Range(ctx context.Context, txn mvcc.TxnRead, p *pb.RangeRequest) (*pb.RangeResponse, error) { - return nil, ErrCorrupt -} - -func (a *applierV3Corrupt) DeleteRange(txn mvcc.TxnWrite, p *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { - return nil, ErrCorrupt -} - -func (a *applierV3Corrupt) Txn(ctx context.Context, rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) { - return nil, nil, ErrCorrupt -} - -func (a *applierV3Corrupt) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, *traceutil.Trace, error) { - return nil, nil, nil, ErrCorrupt -} - -func (a *applierV3Corrupt) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { - return nil, ErrCorrupt -} - -func (a *applierV3Corrupt) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { - return nil, ErrCorrupt -} - const PeerHashKVPath = "/members/hashkv" type hashKVHandler struct { @@ -490,14 +536,14 @@ func (h *hashKVHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } defer r.Body.Close() - b, err := ioutil.ReadAll(r.Body) + b, err := io.ReadAll(r.Body) if err != nil { http.Error(w, "error reading body", http.StatusBadRequest) return } req := &pb.HashKVRequest{} - if err := json.Unmarshal(b, req); err != nil { + if err = json.Unmarshal(b, req); err != nil { h.lg.Warn("failed to unmarshal request", zap.Error(err)) http.Error(w, "error unmarshalling request", http.StatusBadRequest) return @@ -512,7 +558,12 @@ func (h *hashKVHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusBadRequest) return } - resp := &pb.HashKVResponse{Header: &pb.ResponseHeader{Revision: rev}, Hash: hash.Hash, CompactRevision: hash.CompactRevision} + resp := &pb.HashKVResponse{ + Header: &pb.ResponseHeader{Revision: rev}, + Hash: hash.Hash, + CompactRevision: hash.CompactRevision, + HashRevision: hash.Revision, + } respBytes, err := json.Marshal(resp) if err != nil { h.lg.Warn("failed to marshal hashKV response", zap.Error(err)) @@ -532,8 +583,8 @@ func HashByRev(ctx context.Context, cid types.ID, cc *http.Client, url string, r if err != nil { return nil, err } - requestUrl := url + PeerHashKVPath - req, err := http.NewRequest(http.MethodGet, requestUrl, bytes.NewReader(hashReqBytes)) + requestURL := url + PeerHashKVPath + req, err := http.NewRequest(http.MethodGet, requestURL, bytes.NewReader(hashReqBytes)) if err != nil { return nil, err } @@ -547,7 +598,7 @@ func HashByRev(ctx context.Context, cid types.ID, cc *http.Client, url string, r return nil, err } defer resp.Body.Close() - b, err := ioutil.ReadAll(resp.Body) + b, err := io.ReadAll(resp.Body) if err != nil { return nil, err } @@ -561,11 +612,11 @@ func HashByRev(ctx context.Context, cid types.ID, cc *http.Client, url string, r } } else if resp.StatusCode == http.StatusPreconditionFailed { if strings.Contains(string(b), rafthttp.ErrClusterIDMismatch.Error()) { - return nil, rpctypes.ErrClusterIdMismatch + return nil, rpctypes.ErrClusterIDMismatch } } if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("unknown error: %s", string(b)) + return nil, fmt.Errorf("unknown error: %s", b) } hashResp := &pb.HashKVResponse{} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/errors.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/errors.go deleted file mode 100644 index f6d77fe72a38b..0000000000000 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/errors.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "errors" - "fmt" -) - -var ( - ErrUnknownMethod = errors.New("etcdserver: unknown method") - ErrStopped = errors.New("etcdserver: server stopped") - ErrCanceled = errors.New("etcdserver: request cancelled") - ErrTimeout = errors.New("etcdserver: request timed out") - ErrTimeoutDueToLeaderFail = errors.New("etcdserver: request timed out, possibly due to previous leader failure") - ErrTimeoutDueToConnectionLost = errors.New("etcdserver: request timed out, possibly due to connection lost") - ErrTimeoutLeaderTransfer = errors.New("etcdserver: request timed out, leader transfer took too long") - ErrTimeoutWaitAppliedIndex = errors.New("etcdserver: request timed out, waiting for the applied index took too long") - ErrLeaderChanged = errors.New("etcdserver: leader changed") - ErrNotEnoughStartedMembers = errors.New("etcdserver: re-configuration failed due to not enough started members") - ErrLearnerNotReady = errors.New("etcdserver: can only promote a learner member which is in sync with leader") - ErrNoLeader = errors.New("etcdserver: no leader") - ErrNotLeader = errors.New("etcdserver: not leader") - ErrRequestTooLarge = errors.New("etcdserver: request is too large") - ErrNoSpace = errors.New("etcdserver: no space") - ErrTooManyRequests = errors.New("etcdserver: too many requests") - ErrUnhealthy = errors.New("etcdserver: unhealthy cluster") - ErrKeyNotFound = errors.New("etcdserver: key not found") - ErrCorrupt = errors.New("etcdserver: corrupt cluster") - ErrBadLeaderTransferee = errors.New("etcdserver: bad leader transferee") - ErrClusterVersionUnavailable = errors.New("etcdserver: cluster version not found during downgrade") - ErrWrongDowngradeVersionFormat = errors.New("etcdserver: wrong downgrade target version format") - ErrInvalidDowngradeTargetVersion = errors.New("etcdserver: invalid downgrade target version") - ErrDowngradeInProcess = errors.New("etcdserver: cluster has a downgrade job in progress") - ErrNoInflightDowngrade = errors.New("etcdserver: no inflight downgrade job") -) - -type DiscoveryError struct { - Op string - Err error -} - -func (e DiscoveryError) Error() string { - return fmt.Sprintf("failed to %s discovery cluster (%v)", e.Op, e.Err) -} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/errors/errors.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/errors/errors.go new file mode 100644 index 0000000000000..8de698a1df344 --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/errors/errors.go @@ -0,0 +1,54 @@ +// Copyright 2022 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "errors" + "fmt" +) + +var ( + ErrUnknownMethod = errors.New("etcdserver: unknown method") + ErrStopped = errors.New("etcdserver: server stopped") + ErrCanceled = errors.New("etcdserver: request cancelled") + ErrTimeout = errors.New("etcdserver: request timed out") + ErrTimeoutDueToLeaderFail = errors.New("etcdserver: request timed out, possibly due to previous leader failure") + ErrTimeoutDueToConnectionLost = errors.New("etcdserver: request timed out, possibly due to connection lost") + ErrTimeoutLeaderTransfer = errors.New("etcdserver: request timed out, leader transfer took too long") + ErrTimeoutWaitAppliedIndex = errors.New("etcdserver: request timed out, waiting for the applied index took too long") + ErrLeaderChanged = errors.New("etcdserver: leader changed") + ErrNotEnoughStartedMembers = errors.New("etcdserver: re-configuration failed due to not enough started members") + ErrLearnerNotReady = errors.New("etcdserver: can only promote a learner member which is in sync with leader") + ErrNoLeader = errors.New("etcdserver: no leader") + ErrNotLeader = errors.New("etcdserver: not leader") + ErrRequestTooLarge = errors.New("etcdserver: request is too large") + ErrNoSpace = errors.New("etcdserver: no space") + ErrTooManyRequests = errors.New("etcdserver: too many requests") + ErrUnhealthy = errors.New("etcdserver: unhealthy cluster") + ErrCorrupt = errors.New("etcdserver: corrupt cluster") + ErrBadLeaderTransferee = errors.New("etcdserver: bad leader transferee") + ErrClusterVersionUnavailable = errors.New("etcdserver: cluster version not found during downgrade") + ErrWrongDowngradeVersionFormat = errors.New("etcdserver: wrong downgrade target version format") + ErrKeyNotFound = errors.New("etcdserver: key not found") +) + +type DiscoveryError struct { + Op string + Err error +} + +func (e DiscoveryError) Error() string { + return fmt.Sprintf("failed to %s discovery cluster (%v)", e.Op, e.Err) +} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/metrics.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/metrics.go index 06263a9cd299a..4be44a57a6c27 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/metrics.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/metrics.go @@ -18,11 +18,11 @@ import ( goruntime "runtime" "time" - "go.etcd.io/etcd/api/v3/version" - "go.etcd.io/etcd/pkg/v3/runtime" - "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" + + "go.etcd.io/etcd/api/v3/version" + "go.etcd.io/etcd/pkg/v3/runtime" ) var ( @@ -44,12 +44,6 @@ var ( Name: "leader_changes_seen_total", Help: "The number of leader changes seen.", }) - isLearner = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "is_learner", - Help: "Whether or not this member is a learner. 1 if is, 0 otherwise.", - }) learnerPromoteFailed = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "etcd", Subsystem: "server", @@ -70,12 +64,6 @@ var ( Name: "heartbeat_send_failures_total", Help: "The total number of leader heartbeat send failures (likely overloaded from slow disk).", }) - slowApplies = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "slow_apply_total", - Help: "The total number of slow apply requests (likely overloaded from slow disk).", - }) applySnapshotInProgress = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: "etcd", Subsystem: "server", @@ -124,12 +112,7 @@ var ( Name: "lease_expired_total", Help: "The total number of expired leases.", }) - quotaBackendBytes = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "quota_backend_bytes", - Help: "Current backend storage quota size in bytes.", - }) + currentVersion = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "etcd", Subsystem: "server", @@ -164,17 +147,6 @@ var ( Name: "limit", Help: "The file descriptor limit.", }) - applySec = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "apply_duration_seconds", - Help: "The latency distributions of v2 apply called by backend.", - - // lowest bucket start of upper bound 0.0001 sec (0.1 ms) with factor 2 - // highest bucket start of 0.0001 sec * 2^19 == 52.4288 sec - Buckets: prometheus.ExponentialBuckets(0.0001, 2, 20), - }, - []string{"version", "op", "success"}) ) func init() { @@ -182,7 +154,6 @@ func init() { prometheus.MustRegister(isLeader) prometheus.MustRegister(leaderChanges) prometheus.MustRegister(heartbeatSendFailures) - prometheus.MustRegister(slowApplies) prometheus.MustRegister(applySnapshotInProgress) prometheus.MustRegister(proposalsCommitted) prometheus.MustRegister(proposalsApplied) @@ -191,16 +162,13 @@ func init() { prometheus.MustRegister(slowReadIndex) prometheus.MustRegister(readIndexFailed) prometheus.MustRegister(leaseExpired) - prometheus.MustRegister(quotaBackendBytes) prometheus.MustRegister(currentVersion) prometheus.MustRegister(currentGoVersion) prometheus.MustRegister(serverID) - prometheus.MustRegister(isLearner) prometheus.MustRegister(learnerPromoteSucceed) prometheus.MustRegister(learnerPromoteFailed) prometheus.MustRegister(fdUsed) prometheus.MustRegister(fdLimit) - prometheus.MustRegister(applySec) currentVersion.With(prometheus.Labels{ "server_version": version.Version, diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/raft.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/raft.go index c4bf0259797a5..fd4b5dac33713 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/raft.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/raft.go @@ -15,27 +15,20 @@ package etcdserver import ( - "encoding/json" "expvar" "fmt" "log" - "sort" "sync" "time" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.uber.org/zap" + "go.etcd.io/etcd/client/pkg/v3/logutil" - "go.etcd.io/etcd/client/pkg/v3/types" "go.etcd.io/etcd/pkg/v3/contention" - "go.etcd.io/etcd/pkg/v3/pbutil" - "go.etcd.io/etcd/raft/v3" - "go.etcd.io/etcd/raft/v3/raftpb" - "go.etcd.io/etcd/server/v3/config" - "go.etcd.io/etcd/server/v3/etcdserver/api/membership" "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp" - "go.etcd.io/etcd/server/v3/wal" - "go.etcd.io/etcd/server/v3/wal/walpb" - "go.uber.org/zap" + serverstorage "go.etcd.io/etcd/server/v3/storage" + "go.etcd.io/raft/v3" + "go.etcd.io/raft/v3/raftpb" ) const ( @@ -59,7 +52,7 @@ var ( ) func init() { - expvar.Publish("raft.status", expvar.Func(func() interface{} { + expvar.Publish("raft.status", expvar.Func(func() any { raftStatusMu.Lock() defer raftStatusMu.Unlock() if raftStatus == nil { @@ -69,15 +62,19 @@ func init() { })) } -// apply contains entries, snapshot to be applied. Once -// an apply is consumed, the entries will be persisted to -// to raft storage concurrently; the application must read -// raftDone before assuming the raft messages are stable. -type apply struct { +// toApply contains entries, snapshot to be applied. Once +// an toApply is consumed, the entries will be persisted to +// raft storage concurrently; the application must read +// notifyc before assuming the raft messages are stable. +type toApply struct { entries []raftpb.Entry snapshot raftpb.Snapshot // notifyc synchronizes etcd server applies with the raft node notifyc chan struct{} + // raftAdvancedC notifies EtcdServer.apply that + // 'raftLog.applied' has advanced by r.Advance + // it should be used only when entries contain raftpb.EntryConfChange + raftAdvancedC <-chan struct{} } type raftNode struct { @@ -92,7 +89,7 @@ type raftNode struct { msgSnapC chan raftpb.Message // a chan to send out apply - applyc chan apply + applyc chan toApply // a chan to send out readState readStateC chan raft.ReadState @@ -113,7 +110,7 @@ type raftNodeConfig struct { isIDRemoved func(id uint64) bool raft.Node raftStorage *raft.MemoryStorage - storage Storage + storage serverstorage.Storage heartbeat time.Duration // for logging // transport specifies the transport to send and receive msgs to members. // Sending messages MUST NOT block. It is okay to drop messages, since @@ -145,7 +142,7 @@ func newRaftNode(cfg raftNodeConfig) *raftNode { td: contention.NewTimeoutDetector(2 * cfg.heartbeat), readStateC: make(chan raft.ReadState, 1), msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap), - applyc: make(chan apply), + applyc: make(chan toApply), stopped: make(chan struct{}), done: make(chan struct{}), } @@ -219,29 +216,23 @@ func (r *raftNode) start(rh *raftReadyHandler) { } notifyc := make(chan struct{}, 1) - ap := apply{ - entries: rd.CommittedEntries, - snapshot: rd.Snapshot, - notifyc: notifyc, + raftAdvancedC := make(chan struct{}, 1) + ap := toApply{ + entries: rd.CommittedEntries, + snapshot: rd.Snapshot, + notifyc: notifyc, + raftAdvancedC: raftAdvancedC, } updateCommittedIndex(&ap, rh) - waitWALSync := shouldWaitWALSync(rd) - if waitWALSync { - // gofail: var raftBeforeSaveWaitWalSync struct{} - if err := r.storage.Save(rd.HardState, rd.Entries); err != nil { - r.lg.Fatal("failed to save Raft hard state and entries", zap.Error(err)) - } - } - select { case r.applyc <- ap: case <-r.stopped: return } - // the leader can write to its disk in parallel with replicating to the followers and them + // the leader can write to its disk in parallel with replicating to the followers and then // writing to their disks. // For more details, check raft thesis 10.2.1 if islead { @@ -259,11 +250,9 @@ func (r *raftNode) start(rh *raftReadyHandler) { // gofail: var raftAfterSaveSnap struct{} } - if !waitWALSync { - // gofail: var raftBeforeSave struct{} - if err := r.storage.Save(rd.HardState, rd.Entries); err != nil { - r.lg.Fatal("failed to save Raft hard state and entries", zap.Error(err)) - } + // gofail: var raftBeforeSave struct{} + if err := r.storage.Save(rd.HardState, rd.Entries); err != nil { + r.lg.Fatal("failed to save Raft hard state and entries", zap.Error(err)) } if !raft.IsEmptyHardState(rd.HardState) { proposalsCommitted.Set(float64(rd.HardState.Commit)) @@ -295,8 +284,16 @@ func (r *raftNode) start(rh *raftReadyHandler) { r.raftStorage.Append(rd.Entries) + confChanged := false + for _, ent := range rd.CommittedEntries { + if ent.Type == raftpb.EntryConfChange { + confChanged = true + break + } + } + if !islead { - // finish processing incoming messages before we signal raftdone chan + // finish processing incoming messages before we signal notifyc chan msgs := r.processMessages(rd.Messages) // now unblocks 'applyAll' that waits on Raft log disk writes before triggering snapshots @@ -306,17 +303,11 @@ func (r *raftNode) start(rh *raftReadyHandler) { // changes to be applied before sending messages. // Otherwise we might incorrectly count votes (e.g. votes from removed members). // Also slow machine's follower raft-layer could proceed to become the leader - // on its own single-node cluster, before apply-layer applies the config change. + // on its own single-node cluster, before toApply-layer applies the config change. // We simply wait for ALL pending entries to be applied for now. // We might improve this later on if it causes unnecessary long blocking issues. - waitApply := false - for _, ent := range rd.CommittedEntries { - if ent.Type == raftpb.EntryConfChange { - waitApply = true - break - } - } - if waitApply { + + if confChanged { // blocks until 'applyAll' calls 'applyWait.Trigger' // to be in sync with scheduled config-change job // (assume notifyc has cap of 1) @@ -334,7 +325,13 @@ func (r *raftNode) start(rh *raftReadyHandler) { notifyc <- struct{}{} } + // gofail: var raftBeforeAdvance struct{} r.Advance() + + if confChanged { + // notify etcdserver that raft has already been notified or advanced. + raftAdvancedC <- struct{}{} + } case <-r.stopped: return } @@ -342,44 +339,7 @@ func (r *raftNode) start(rh *raftReadyHandler) { }() } -// For a cluster with only one member, the raft may send both the -// unstable entries and committed entries to etcdserver, and there -// may have overlapped log entries between them. -// -// etcd responds to the client once it finishes (actually partially) -// the applying workflow. But when the client receives the response, -// it doesn't mean etcd has already successfully saved the data, -// including BoltDB and WAL, because: -// 1. etcd commits the boltDB transaction periodically instead of on each request; -// 2. etcd saves WAL entries in parallel with applying the committed entries. -// -// Accordingly, it might run into a situation of data loss when the etcd crashes -// immediately after responding to the client and before the boltDB and WAL -// successfully save the data to disk. -// Note that this issue can only happen for clusters with only one member. -// -// For clusters with multiple members, it isn't an issue, because etcd will -// not commit & apply the data before it being replicated to majority members. -// When the client receives the response, it means the data must have been applied. -// It further means the data must have been committed. -// Note: for clusters with multiple members, the raft will never send identical -// unstable entries and committed entries to etcdserver. -// -// Refer to https://github.com/etcd-io/etcd/issues/14370. -func shouldWaitWALSync(rd raft.Ready) bool { - if len(rd.CommittedEntries) == 0 || len(rd.Entries) == 0 { - return false - } - - // Check if there is overlap between unstable and committed entries - // assuming that their index and term are only incrementing. - lastCommittedEntry := rd.CommittedEntries[len(rd.CommittedEntries)-1] - firstUnstableEntry := rd.Entries[0] - return lastCommittedEntry.Term > firstUnstableEntry.Term || - (lastCommittedEntry.Term == firstUnstableEntry.Term && lastCommittedEntry.Index >= firstUnstableEntry.Index) -} - -func updateCommittedIndex(ap *apply, rh *raftReadyHandler) { +func updateCommittedIndex(ap *toApply, rh *raftReadyHandler) { var ci uint64 if len(ap.entries) != 0 { ci = ap.entries[len(ap.entries)-1].Index @@ -397,6 +357,7 @@ func (r *raftNode) processMessages(ms []raftpb.Message) []raftpb.Message { for i := len(ms) - 1; i >= 0; i-- { if r.isIDRemoved(ms[i].To) { ms[i].To = 0 + continue } if ms[i].Type == raftpb.MsgAppResp { @@ -437,12 +398,19 @@ func (r *raftNode) processMessages(ms []raftpb.Message) []raftpb.Message { return ms } -func (r *raftNode) apply() chan apply { +func (r *raftNode) apply() chan toApply { return r.applyc } func (r *raftNode) stop() { - r.stopped <- struct{}{} + select { + case r.stopped <- struct{}{}: + // Not already stopped, so trigger it + case <-r.done: + // Has already been stopped - no need to do anything + return + } + // Block until the stop has been acknowledged by start() <-r.done } @@ -476,271 +444,3 @@ func (r *raftNode) advanceTicks(ticks int) { r.tick() } } - -func startNode(cfg config.ServerConfig, cl *membership.RaftCluster, ids []types.ID) (id types.ID, n raft.Node, s *raft.MemoryStorage, w *wal.WAL) { - var err error - member := cl.MemberByName(cfg.Name) - metadata := pbutil.MustMarshal( - &pb.Metadata{ - NodeID: uint64(member.ID), - ClusterID: uint64(cl.ID()), - }, - ) - if w, err = wal.Create(cfg.Logger, cfg.WALDir(), metadata); err != nil { - cfg.Logger.Panic("failed to create WAL", zap.Error(err)) - } - if cfg.UnsafeNoFsync { - w.SetUnsafeNoFsync() - } - peers := make([]raft.Peer, len(ids)) - for i, id := range ids { - var ctx []byte - ctx, err = json.Marshal((*cl).Member(id)) - if err != nil { - cfg.Logger.Panic("failed to marshal member", zap.Error(err)) - } - peers[i] = raft.Peer{ID: uint64(id), Context: ctx} - } - id = member.ID - cfg.Logger.Info( - "starting local member", - zap.String("local-member-id", id.String()), - zap.String("cluster-id", cl.ID().String()), - ) - s = raft.NewMemoryStorage() - c := &raft.Config{ - ID: uint64(id), - ElectionTick: cfg.ElectionTicks, - HeartbeatTick: 1, - Storage: s, - MaxSizePerMsg: maxSizePerMsg, - MaxInflightMsgs: maxInflightMsgs, - CheckQuorum: true, - PreVote: cfg.PreVote, - Logger: NewRaftLoggerZap(cfg.Logger.Named("raft")), - } - if len(peers) == 0 { - n = raft.RestartNode(c) - } else { - n = raft.StartNode(c, peers) - } - raftStatusMu.Lock() - raftStatus = n.Status - raftStatusMu.Unlock() - return id, n, s, w -} - -func restartNode(cfg config.ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) { - var walsnap walpb.Snapshot - if snapshot != nil { - walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term - } - w, id, cid, st, ents := readWAL(cfg.Logger, cfg.WALDir(), walsnap, cfg.UnsafeNoFsync) - - cfg.Logger.Info( - "restarting local member", - zap.String("cluster-id", cid.String()), - zap.String("local-member-id", id.String()), - zap.Uint64("commit-index", st.Commit), - ) - cl := membership.NewCluster(cfg.Logger) - cl.SetID(id, cid) - s := raft.NewMemoryStorage() - if snapshot != nil { - s.ApplySnapshot(*snapshot) - } - s.SetHardState(st) - s.Append(ents) - c := &raft.Config{ - ID: uint64(id), - ElectionTick: cfg.ElectionTicks, - HeartbeatTick: 1, - Storage: s, - MaxSizePerMsg: maxSizePerMsg, - MaxInflightMsgs: maxInflightMsgs, - CheckQuorum: true, - PreVote: cfg.PreVote, - Logger: NewRaftLoggerZap(cfg.Logger.Named("raft")), - } - - n := raft.RestartNode(c) - raftStatusMu.Lock() - raftStatus = n.Status - raftStatusMu.Unlock() - return id, cl, n, s, w -} - -func restartAsStandaloneNode(cfg config.ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) { - var walsnap walpb.Snapshot - if snapshot != nil { - walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term - } - w, id, cid, st, ents := readWAL(cfg.Logger, cfg.WALDir(), walsnap, cfg.UnsafeNoFsync) - - // discard the previously uncommitted entries - for i, ent := range ents { - if ent.Index > st.Commit { - cfg.Logger.Info( - "discarding uncommitted WAL entries", - zap.Uint64("entry-index", ent.Index), - zap.Uint64("commit-index-from-wal", st.Commit), - zap.Int("number-of-discarded-entries", len(ents)-i), - ) - ents = ents[:i] - break - } - } - - // force append the configuration change entries - toAppEnts := createConfigChangeEnts( - cfg.Logger, - getIDs(cfg.Logger, snapshot, ents), - uint64(id), - st.Term, - st.Commit, - ) - ents = append(ents, toAppEnts...) - - // force commit newly appended entries - err := w.Save(raftpb.HardState{}, toAppEnts) - if err != nil { - cfg.Logger.Fatal("failed to save hard state and entries", zap.Error(err)) - } - if len(ents) != 0 { - st.Commit = ents[len(ents)-1].Index - } - - cfg.Logger.Info( - "forcing restart member", - zap.String("cluster-id", cid.String()), - zap.String("local-member-id", id.String()), - zap.Uint64("commit-index", st.Commit), - ) - - cl := membership.NewCluster(cfg.Logger) - cl.SetID(id, cid) - s := raft.NewMemoryStorage() - if snapshot != nil { - s.ApplySnapshot(*snapshot) - } - s.SetHardState(st) - s.Append(ents) - c := &raft.Config{ - ID: uint64(id), - ElectionTick: cfg.ElectionTicks, - HeartbeatTick: 1, - Storage: s, - MaxSizePerMsg: maxSizePerMsg, - MaxInflightMsgs: maxInflightMsgs, - CheckQuorum: true, - PreVote: cfg.PreVote, - Logger: NewRaftLoggerZap(cfg.Logger.Named("raft")), - } - - n := raft.RestartNode(c) - raftStatus = n.Status - return id, cl, n, s, w -} - -// getIDs returns an ordered set of IDs included in the given snapshot and -// the entries. The given snapshot/entries can contain three kinds of -// ID-related entry: -// - ConfChangeAddNode, in which case the contained ID will be added into the set. -// - ConfChangeRemoveNode, in which case the contained ID will be removed from the set. -// - ConfChangeAddLearnerNode, in which the contained ID will be added into the set. -func getIDs(lg *zap.Logger, snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 { - ids := make(map[uint64]bool) - if snap != nil { - for _, id := range snap.Metadata.ConfState.Voters { - ids[id] = true - } - } - for _, e := range ents { - if e.Type != raftpb.EntryConfChange { - continue - } - var cc raftpb.ConfChange - pbutil.MustUnmarshal(&cc, e.Data) - switch cc.Type { - case raftpb.ConfChangeAddLearnerNode: - ids[cc.NodeID] = true - case raftpb.ConfChangeAddNode: - ids[cc.NodeID] = true - case raftpb.ConfChangeRemoveNode: - delete(ids, cc.NodeID) - case raftpb.ConfChangeUpdateNode: - // do nothing - default: - lg.Panic("unknown ConfChange Type", zap.String("type", cc.Type.String())) - } - } - sids := make(types.Uint64Slice, 0, len(ids)) - for id := range ids { - sids = append(sids, id) - } - sort.Sort(sids) - return []uint64(sids) -} - -// createConfigChangeEnts creates a series of Raft entries (i.e. -// EntryConfChange) to remove the set of given IDs from the cluster. The ID -// `self` is _not_ removed, even if present in the set. -// If `self` is not inside the given ids, it creates a Raft entry to add a -// default member with the given `self`. -func createConfigChangeEnts(lg *zap.Logger, ids []uint64, self uint64, term, index uint64) []raftpb.Entry { - found := false - for _, id := range ids { - if id == self { - found = true - } - } - - var ents []raftpb.Entry - next := index + 1 - - // NB: always add self first, then remove other nodes. Raft will panic if the - // set of voters ever becomes empty. - if !found { - m := membership.Member{ - ID: types.ID(self), - RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://localhost:2380"}}, - } - ctx, err := json.Marshal(m) - if err != nil { - lg.Panic("failed to marshal member", zap.Error(err)) - } - cc := &raftpb.ConfChange{ - Type: raftpb.ConfChangeAddNode, - NodeID: self, - Context: ctx, - } - e := raftpb.Entry{ - Type: raftpb.EntryConfChange, - Data: pbutil.MustMarshal(cc), - Term: term, - Index: next, - } - ents = append(ents, e) - next++ - } - - for _, id := range ids { - if id == self { - continue - } - cc := &raftpb.ConfChange{ - Type: raftpb.ConfChangeRemoveNode, - NodeID: id, - } - e := raftpb.Entry{ - Type: raftpb.EntryConfChange, - Data: pbutil.MustMarshal(cc), - Term: term, - Index: next, - } - ents = append(ents, e) - next++ - } - - return ents -} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/server.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/server.go index 2655d47d67049..3e5c6b619d2f9 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/server.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/server.go @@ -17,16 +17,14 @@ package etcdserver import ( "context" "encoding/json" + errorspkg "errors" "expvar" "fmt" "math" - "math/rand" "net/http" - "os" "path" "regexp" "strconv" - "strings" "sync" "sync/atomic" "time" @@ -34,8 +32,6 @@ import ( "github.com/coreos/go-semver/semver" humanize "github.com/dustin/go-humanize" "github.com/prometheus/client_golang/prometheus" - "go.etcd.io/etcd/server/v3/config" - "go.etcd.io/etcd/server/v3/wal/walpb" "go.uber.org/zap" pb "go.etcd.io/etcd/api/v3/etcdserverpb" @@ -43,35 +39,43 @@ import ( "go.etcd.io/etcd/api/v3/version" "go.etcd.io/etcd/client/pkg/v3/fileutil" "go.etcd.io/etcd/client/pkg/v3/types" + "go.etcd.io/etcd/client/pkg/v3/verify" + "go.etcd.io/etcd/pkg/v3/featuregate" "go.etcd.io/etcd/pkg/v3/idutil" + "go.etcd.io/etcd/pkg/v3/notify" "go.etcd.io/etcd/pkg/v3/pbutil" "go.etcd.io/etcd/pkg/v3/runtime" "go.etcd.io/etcd/pkg/v3/schedule" "go.etcd.io/etcd/pkg/v3/traceutil" "go.etcd.io/etcd/pkg/v3/wait" - "go.etcd.io/etcd/raft/v3" - "go.etcd.io/etcd/raft/v3/raftpb" "go.etcd.io/etcd/server/v3/auth" + "go.etcd.io/etcd/server/v3/config" "go.etcd.io/etcd/server/v3/etcdserver/api" + httptypes "go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/types" "go.etcd.io/etcd/server/v3/etcdserver/api/membership" "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp" "go.etcd.io/etcd/server/v3/etcdserver/api/snap" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2discovery" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2http/httptypes" stats "go.etcd.io/etcd/server/v3/etcdserver/api/v2stats" "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" "go.etcd.io/etcd/server/v3/etcdserver/api/v3alarm" "go.etcd.io/etcd/server/v3/etcdserver/api/v3compactor" + "go.etcd.io/etcd/server/v3/etcdserver/apply" "go.etcd.io/etcd/server/v3/etcdserver/cindex" + "go.etcd.io/etcd/server/v3/etcdserver/errors" + "go.etcd.io/etcd/server/v3/etcdserver/txn" + serverversion "go.etcd.io/etcd/server/v3/etcdserver/version" "go.etcd.io/etcd/server/v3/lease" "go.etcd.io/etcd/server/v3/lease/leasehttp" - "go.etcd.io/etcd/server/v3/mvcc" - "go.etcd.io/etcd/server/v3/mvcc/backend" - "go.etcd.io/etcd/server/v3/wal" + serverstorage "go.etcd.io/etcd/server/v3/storage" + "go.etcd.io/etcd/server/v3/storage/backend" + "go.etcd.io/etcd/server/v3/storage/mvcc" + "go.etcd.io/etcd/server/v3/storage/schema" + "go.etcd.io/raft/v3" + "go.etcd.io/raft/v3/raftpb" ) const ( - DefaultSnapshotCount = 100000 + DefaultSnapshotCount = 10000 // DefaultSnapshotCatchUpEntries is the number of entries for a slow follower // to catch-up after compacting the raft storage entries. @@ -84,7 +88,7 @@ const ( StoreKeysPrefix = "/1" // HealthInterval is the minimum time the cluster should be healthy - // before accepting add member requests. + // before accepting add and delete member requests. HealthInterval = 5 * time.Second purgeFileInterval = 30 * time.Second @@ -100,7 +104,9 @@ const ( recommendedMaxRequestBytes = 10 * 1024 * 1024 - readyPercent = 0.9 + // readyPercentThreshold is a threshold used to determine + // whether a learner is ready for a transition into a full voting member or not. + readyPercentThreshold = 0.9 DowngradeEnabledPath = "/downgrade/enabled" ) @@ -116,12 +122,10 @@ var ( ) func init() { - rand.Seed(time.Now().UnixNano()) - expvar.Publish( "file_descriptor_limit", expvar.Func( - func() interface{} { + func() any { n, _ := runtime.FDLimit() return n }, @@ -141,15 +145,12 @@ type ServerV2 interface { Server Leader() types.ID - // Do takes a V2 request and attempts to fulfill it, returning a Response. - Do(ctx context.Context, r pb.Request) (Response, error) - stats.Stats ClientCertAuthEnabled() bool } type ServerV3 interface { Server - RaftStatusGetter + apply.RaftStatusGetter } func (s *EtcdServer) ClientCertAuthEnabled() bool { return s.Cfg.ClientCertAuthEnabled } @@ -187,6 +188,9 @@ type Server interface { // the leader is etcd 2.0. etcd 2.0 leader will not update clusterVersion since // this feature is introduced post 2.0. ClusterVersion() *semver.Version + // StorageVersion is the storage schema version. It's supported starting + // from 3.6. + StorageVersion() *semver.Version Cluster() api.Cluster Alarms() []*pb.AlarmMember @@ -235,11 +239,10 @@ type EtcdServer struct { // done is closed when all goroutines from start() complete. done chan struct{} // leaderChanged is used to notify the linearizable read loop to drop the old read requests. - leaderChanged chan struct{} - leaderChangedMu sync.RWMutex + leaderChanged *notify.Notifier errorc chan error - id types.ID + memberID types.ID attributes membership.Attributes cluster *membership.RaftCluster @@ -247,21 +250,15 @@ type EtcdServer struct { v2store v2store.Store snapshotter *snap.Snapshotter - applyV2 ApplierV2 + uberApply apply.UberApplier - // applyV3 is the applier with auth and quotas - applyV3 applierV3 - // applyV3Base is the core applier without auth or quotas - applyV3Base applierV3 - // applyV3Internal is the applier for internal request - applyV3Internal applierV3Internal - applyWait wait.WaitTime + applyWait wait.WaitTime kv mvcc.WatchableKV lessor lease.Lessor - bemu sync.Mutex + bemu sync.RWMutex be backend.Backend - beHooks *backendHooks + beHooks *serverstorage.BackendHooks authStore auth.AuthStore alarmStore *v3alarm.AlarmStore @@ -290,310 +287,63 @@ type EtcdServer struct { leadTimeMu sync.RWMutex leadElectedTime time.Time - firstCommitInTermMu sync.RWMutex - firstCommitInTermC chan struct{} + firstCommitInTerm *notify.Notifier + clusterVersionChanged *notify.Notifier *AccessController + // forceSnapshot can force snapshot be triggered after apply, independent of the snapshotCount. + // Should only be set within apply code path. Used to force snapshot after cluster version downgrade. + forceSnapshot bool corruptionChecker CorruptionChecker } -type backendHooks struct { - indexer cindex.ConsistentIndexer - lg *zap.Logger - - // confState to be written in the next submitted backend transaction (if dirty) - confState raftpb.ConfState - // first write changes it to 'dirty'. false by default, so - // not initialized `confState` is meaningless. - confStateDirty bool - confStateLock sync.Mutex -} - -func (bh *backendHooks) OnPreCommitUnsafe(tx backend.BatchTx) { - bh.indexer.UnsafeSave(tx) - bh.confStateLock.Lock() - defer bh.confStateLock.Unlock() - if bh.confStateDirty { - membership.MustUnsafeSaveConfStateToBackend(bh.lg, tx, &bh.confState) - // save bh.confState - bh.confStateDirty = false - } -} - -func (bh *backendHooks) SetConfState(confState *raftpb.ConfState) { - bh.confStateLock.Lock() - defer bh.confStateLock.Unlock() - bh.confState = *confState - bh.confStateDirty = true -} - // NewServer creates a new EtcdServer from the supplied configuration. The // configuration is considered static for the lifetime of the EtcdServer. func NewServer(cfg config.ServerConfig) (srv *EtcdServer, err error) { - st := v2store.New(StoreClusterPrefix, StoreKeysPrefix) - - var ( - w *wal.WAL - n raft.Node - s *raft.MemoryStorage - id types.ID - cl *membership.RaftCluster - ) - - if cfg.MaxRequestBytes > recommendedMaxRequestBytes { - cfg.Logger.Warn( - "exceeded recommended request limit", - zap.Uint("max-request-bytes", cfg.MaxRequestBytes), - zap.String("max-request-size", humanize.Bytes(uint64(cfg.MaxRequestBytes))), - zap.Int("recommended-request-bytes", recommendedMaxRequestBytes), - zap.String("recommended-request-size", recommendedMaxRequestBytesString), - ) - } - - if terr := fileutil.TouchDirAll(cfg.Logger, cfg.DataDir); terr != nil { - return nil, fmt.Errorf("cannot access data directory: %v", terr) - } - - haveWAL := wal.Exist(cfg.WALDir()) - - if err = fileutil.TouchDirAll(cfg.Logger, cfg.SnapDir()); err != nil { - cfg.Logger.Fatal( - "failed to create snapshot directory", - zap.String("path", cfg.SnapDir()), - zap.Error(err), - ) - } - - if err = fileutil.RemoveMatchFile(cfg.Logger, cfg.SnapDir(), func(fileName string) bool { - return strings.HasPrefix(fileName, "tmp") - }); err != nil { - cfg.Logger.Error( - "failed to remove temp file(s) in snapshot directory", - zap.String("path", cfg.SnapDir()), - zap.Error(err), - ) - } - - ss := snap.New(cfg.Logger, cfg.SnapDir()) - - bepath := cfg.BackendPath() - beExist := fileutil.Exist(bepath) - - ci := cindex.NewConsistentIndex(nil) - beHooks := &backendHooks{lg: cfg.Logger, indexer: ci} - be := openBackend(cfg, beHooks) - ci.SetBackend(be) - cindex.CreateMetaBucket(be.BatchTx()) - - if cfg.ExperimentalBootstrapDefragThresholdMegabytes != 0 { - err := maybeDefragBackend(cfg, be) - if err != nil { - return nil, err - } - } - - defer func() { - if be != nil && err != nil { - be.Close() - } - }() - - prt, err := rafthttp.NewRoundTripper(cfg.PeerTLSInfo, cfg.PeerDialTimeout()) + b, err := bootstrap(cfg) if err != nil { + cfg.Logger.Error("bootstrap failed", zap.Error(err)) return nil, err } - var ( - remotes []*membership.Member - snapshot *raftpb.Snapshot - ) + cfg.Logger.Info("bootstrap successfully") - switch { - case !haveWAL && !cfg.NewCluster: - if err = cfg.VerifyJoinExisting(); err != nil { - return nil, err - } - cl, err = membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, cfg.InitialPeerURLsMap) - if err != nil { - return nil, err - } - existingCluster, gerr := GetClusterFromRemotePeers(cfg.Logger, getRemotePeerURLs(cl, cfg.Name), prt) - if gerr != nil { - return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %v", gerr) - } - if err = membership.ValidateClusterAndAssignIDs(cfg.Logger, cl, existingCluster); err != nil { - return nil, fmt.Errorf("error validating peerURLs %s: %v", existingCluster, err) - } - if !isCompatibleWithCluster(cfg.Logger, cl, cl.MemberByName(cfg.Name).ID, prt) { - return nil, fmt.Errorf("incompatible with current running cluster") - } - - remotes = existingCluster.Members() - cl.SetID(types.ID(0), existingCluster.ID()) - cl.SetStore(st) - cl.SetBackend(be) - id, n, s, w = startNode(cfg, cl, nil) - cl.SetID(id, existingCluster.ID()) - - case !haveWAL && cfg.NewCluster: - if err = cfg.VerifyBootstrap(); err != nil { - return nil, err - } - cl, err = membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, cfg.InitialPeerURLsMap) - if err != nil { - return nil, err - } - m := cl.MemberByName(cfg.Name) - if isMemberBootstrapped(cfg.Logger, cl, cfg.Name, prt, cfg.BootstrapTimeoutEffective()) { - return nil, fmt.Errorf("member %s has already been bootstrapped", m.ID) - } - if cfg.ShouldDiscover() { - var str string - str, err = v2discovery.JoinCluster(cfg.Logger, cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.InitialPeerURLsMap.String()) - if err != nil { - return nil, &DiscoveryError{Op: "join", Err: err} - } - var urlsmap types.URLsMap - urlsmap, err = types.NewURLsMap(str) - if err != nil { - return nil, err - } - if config.CheckDuplicateURL(urlsmap) { - return nil, fmt.Errorf("discovery cluster %s has duplicate url", urlsmap) - } - if cl, err = membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, urlsmap); err != nil { - return nil, err - } - } - cl.SetStore(st) - cl.SetBackend(be) - id, n, s, w = startNode(cfg, cl, cl.MemberIDs()) - cl.SetID(id, cl.ID()) - - case haveWAL: - if err = fileutil.IsDirWriteable(cfg.MemberDir()); err != nil { - return nil, fmt.Errorf("cannot write to member directory: %v", err) - } - - if err = fileutil.IsDirWriteable(cfg.WALDir()); err != nil { - return nil, fmt.Errorf("cannot write to WAL directory: %v", err) - } - - if cfg.ShouldDiscover() { - cfg.Logger.Warn( - "discovery token is ignored since cluster already initialized; valid logs are found", - zap.String("wal-dir", cfg.WALDir()), - ) - } - - // Find a snapshot to start/restart a raft node - var walSnaps []walpb.Snapshot - walSnaps, err = wal.ValidSnapshotEntries(cfg.Logger, cfg.WALDir()) + defer func() { if err != nil { - return nil, err - } - // snapshot files can be orphaned if etcd crashes after writing them but before writing the corresponding - // wal log entries - snapshot, err = ss.LoadNewestAvailable(walSnaps) - if err != nil && err != snap.ErrNoSnapshot { - return nil, err - } - - if snapshot != nil { - if err = st.Recovery(snapshot.Data); err != nil { - cfg.Logger.Panic("failed to recover from snapshot", zap.Error(err)) - } - - if err = assertNoV2StoreContent(cfg.Logger, st, cfg.V2Deprecation); err != nil { - cfg.Logger.Error("illegal v2store content", zap.Error(err)) - return nil, err - } - - cfg.Logger.Info( - "recovered v2 store from snapshot", - zap.Uint64("snapshot-index", snapshot.Metadata.Index), - zap.String("snapshot-size", humanize.Bytes(uint64(snapshot.Size()))), - ) - - if be, err = recoverSnapshotBackend(cfg, be, *snapshot, beExist, beHooks); err != nil { - cfg.Logger.Panic("failed to recover v3 backend from snapshot", zap.Error(err)) - } - // A snapshot db may have already been recovered, and the old db should have - // already been closed in this case, so we should set the backend again. - ci.SetBackend(be) - s1, s2 := be.Size(), be.SizeInUse() - cfg.Logger.Info( - "recovered v3 backend from snapshot", - zap.Int64("backend-size-bytes", s1), - zap.String("backend-size", humanize.Bytes(uint64(s1))), - zap.Int64("backend-size-in-use-bytes", s2), - zap.String("backend-size-in-use", humanize.Bytes(uint64(s2))), - ) - } else { - cfg.Logger.Info("No snapshot found. Recovering WAL from scratch!") - } - - if !cfg.ForceNewCluster { - id, cl, n, s, w = restartNode(cfg, snapshot) - } else { - id, cl, n, s, w = restartAsStandaloneNode(cfg, snapshot) + b.Close() } + }() - cl.SetStore(st) - cl.SetBackend(be) - cl.Recover(api.UpdateCapability) - if cl.Version() != nil && !cl.Version().LessThan(semver.Version{Major: 3}) && !beExist { - os.RemoveAll(bepath) - return nil, fmt.Errorf("database file (%v) of the backend is missing", bepath) - } - - default: - return nil, fmt.Errorf("unsupported bootstrap config") - } - - if terr := fileutil.TouchDirAll(cfg.Logger, cfg.MemberDir()); terr != nil { - return nil, fmt.Errorf("cannot access member directory: %v", terr) - } - - sstats := stats.NewServerStats(cfg.Name, id.String()) - lstats := stats.NewLeaderStats(cfg.Logger, id.String()) + sstats := stats.NewServerStats(cfg.Name, b.cluster.cl.String()) + lstats := stats.NewLeaderStats(cfg.Logger, b.cluster.nodeID.String()) heartbeat := time.Duration(cfg.TickMs) * time.Millisecond srv = &EtcdServer{ - readych: make(chan struct{}), - Cfg: cfg, - lgMu: new(sync.RWMutex), - lg: cfg.Logger, - errorc: make(chan error, 1), - v2store: st, - snapshotter: ss, - r: *newRaftNode( - raftNodeConfig{ - lg: cfg.Logger, - isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) }, - Node: n, - heartbeat: heartbeat, - raftStorage: s, - storage: NewStorage(w, ss), - }, - ), - id: id, - attributes: membership.Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()}, - cluster: cl, - stats: sstats, - lstats: lstats, - SyncTicker: time.NewTicker(500 * time.Millisecond), - peerRt: prt, - reqIDGen: idutil.NewGenerator(uint16(id), time.Now()), - AccessController: &AccessController{CORS: cfg.CORS, HostWhitelist: cfg.HostWhitelist}, - consistIndex: ci, - firstCommitInTermC: make(chan struct{}), - } - serverID.With(prometheus.Labels{"server_id": id.String()}).Set(1) - - srv.applyV2 = NewApplierV2(cfg.Logger, srv.v2store, srv.cluster) - - srv.be = be - srv.beHooks = beHooks + readych: make(chan struct{}), + Cfg: cfg, + lgMu: new(sync.RWMutex), + lg: cfg.Logger, + errorc: make(chan error, 1), + v2store: b.storage.st, + snapshotter: b.ss, + r: *b.raft.newRaftNode(b.ss, b.storage.wal.w, b.cluster.cl), + memberID: b.cluster.nodeID, + attributes: membership.Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()}, + cluster: b.cluster.cl, + stats: sstats, + lstats: lstats, + SyncTicker: time.NewTicker(500 * time.Millisecond), + peerRt: b.prt, + reqIDGen: idutil.NewGenerator(uint16(b.cluster.nodeID), time.Now()), + AccessController: &AccessController{CORS: cfg.CORS, HostWhitelist: cfg.HostWhitelist}, + consistIndex: b.storage.backend.ci, + firstCommitInTerm: notify.NewNotifier(), + clusterVersionChanged: notify.NewNotifier(), + } + serverID.With(prometheus.Labels{"server_id": b.cluster.nodeID.String()}).Set(1) + srv.cluster.SetVersionChangedNotifier(srv.clusterVersionChanged) + + srv.be = b.storage.backend.be + srv.beHooks = b.storage.backend.beHooks minTTL := time.Duration((3*cfg.ElectionTicks)/2) * heartbeat // always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases. @@ -621,26 +371,9 @@ func NewServer(cfg config.ServerConfig) (srv *EtcdServer, err error) { CompactionSleepInterval: cfg.CompactionSleepInterval, } srv.kv = mvcc.New(srv.Logger(), srv.be, srv.lessor, mvccStoreConfig) - - kvindex := ci.ConsistentIndex() - srv.lg.Debug("restore consistentIndex", zap.Uint64("index", kvindex)) - - if beExist { - // TODO: remove kvindex != 0 checking when we do not expect users to upgrade - // etcd from pre-3.0 release. - if snapshot != nil && kvindex < snapshot.Metadata.Index { - if kvindex != 0 { - return nil, fmt.Errorf("database file (%v index %d) does not match with snapshot (index %d)", bepath, kvindex, snapshot.Metadata.Index) - } - cfg.Logger.Warn( - "consistent index was never saved", - zap.Uint64("snapshot-index", snapshot.Metadata.Index), - ) - } - } srv.corruptionChecker = newCorruptionChecker(cfg.Logger, srv, srv.kv.HashStorage()) - srv.authStore = auth.NewAuthStore(srv.Logger(), srv.be, tp, int(cfg.BcryptCost)) + srv.authStore = auth.NewAuthStore(srv.Logger(), schema.NewAuthBackend(srv.Logger(), srv.be), tp, int(cfg.BcryptCost)) newSrv := srv // since srv == nil in defer if srv is returned as nil defer func() { @@ -658,18 +391,17 @@ func NewServer(cfg config.ServerConfig) (srv *EtcdServer, err error) { srv.compactor.Run() } - srv.applyV3Base = srv.newApplierV3Backend() - srv.applyV3Internal = srv.newApplierV3Internal() if err = srv.restoreAlarms(); err != nil { return nil, err } + srv.uberApply = srv.NewUberApplier() if srv.Cfg.EnableLeaseCheckpoint { // setting checkpointer enables lease checkpoint feature. srv.lessor.SetCheckpointer(func(ctx context.Context, cp *pb.LeaseCheckpointRequest) error { if !srv.ensureLeadership() { srv.lg.Warn("Ignore the checkpoint request because current member isn't a leader", - zap.Uint64("local-member-id", uint64(srv.ID()))) + zap.Uint64("local-member-id", uint64(srv.MemberID()))) return lease.ErrNotPrimary } @@ -687,11 +419,11 @@ func NewServer(cfg config.ServerConfig) (srv *EtcdServer, err error) { Logger: cfg.Logger, TLSInfo: cfg.PeerTLSInfo, DialTimeout: cfg.PeerDialTimeout(), - ID: id, + ID: b.cluster.nodeID, URLs: cfg.PeerURLs, - ClusterID: cl.ID(), + ClusterID: b.cluster.cl.ID(), Raft: srv, - Snapshotter: ss, + Snapshotter: b.ss, ServerStats: sstats, LeaderStats: lstats, ErrorC: srv.errorc, @@ -700,13 +432,13 @@ func NewServer(cfg config.ServerConfig) (srv *EtcdServer, err error) { return nil, err } // add all remotes into transport - for _, m := range remotes { - if m.ID != id { + for _, m := range b.cluster.remotes { + if m.ID != b.cluster.nodeID { tr.AddRemote(m.ID, m.PeerURLs) } } - for _, m := range cl.Members() { - if m.ID != id { + for _, m := range b.cluster.cl.Members() { + if m.ID != b.cluster.nodeID { tr.AddPeer(m.ID, m.PeerURLs) } } @@ -715,23 +447,6 @@ func NewServer(cfg config.ServerConfig) (srv *EtcdServer, err error) { return srv, nil } -// assertNoV2StoreContent -> depending on the deprecation stage, warns or report an error -// if the v2store contains custom content. -func assertNoV2StoreContent(lg *zap.Logger, st v2store.Store, deprecationStage config.V2DeprecationEnum) error { - metaOnly, err := membership.IsMetaStoreOnly(st) - if err != nil { - return err - } - if metaOnly { - return nil - } - if deprecationStage.IsAtLeast(config.V2_DEPR_1_WRITE_ONLY) { - return fmt.Errorf("detected disallowed custom content in v2store for stage --v2-deprecation=%s", deprecationStage) - } - lg.Warn("detected custom v2store content. Etcd v3.5 is the last version allowing to access it using API v2. Please remove the content.") - return nil -} - func (s *EtcdServer) Logger() *zap.Logger { s.lgMu.RLock() l := s.lg @@ -743,6 +458,11 @@ func (s *EtcdServer) Config() config.ServerConfig { return s.Cfg } +// FeatureEnabled returns true if the feature is enabled by the etcd server, false otherwise. +func (s *EtcdServer) FeatureEnabled(f featuregate.Feature) bool { + return s.Cfg.ServerFeatureGate.Enabled(f) +} + func tickToDur(ticks int, tickMs uint) string { return fmt.Sprintf("%v", time.Duration(ticks)*time.Duration(tickMs)*time.Millisecond) } @@ -756,7 +476,7 @@ func (s *EtcdServer) adjustTicks() { ticks := s.Cfg.ElectionTicks - 1 lg.Info( "started as single-node; fast-forwarding election ticks", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberID().String()), zap.Int("forward-ticks", ticks), zap.String("forward-duration", tickToDur(ticks, s.Cfg.TickMs)), zap.Int("election-ticks", s.Cfg.ElectionTicks), @@ -795,7 +515,7 @@ func (s *EtcdServer) adjustTicks() { lg.Info( "initialized peer connections; fast-forwarding election ticks", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberID().String()), zap.Int("forward-ticks", ticks), zap.String("forward-duration", tickToDur(ticks, s.Cfg.TickMs)), zap.Int("election-ticks", s.Cfg.ElectionTicks), @@ -816,12 +536,11 @@ func (s *EtcdServer) adjustTicks() { func (s *EtcdServer) Start() { s.start() s.GoAttach(func() { s.adjustTicks() }) - // TODO: Switch to publishV3 in 3.6. - // Support for cluster_member_set_attr was added in 3.5. - s.GoAttach(func() { s.publish(s.Cfg.ReqTimeout()) }) + s.GoAttach(func() { s.publishV3(s.Cfg.ReqTimeout()) }) s.GoAttach(s.purgeFile) s.GoAttach(func() { monitorFileDescriptor(s.Logger(), s.stopping) }) - s.GoAttach(s.monitorVersions) + s.GoAttach(s.monitorClusterVersions) + s.GoAttach(s.monitorStorageVersion) s.GoAttach(s.linearizableReadLoop) s.GoAttach(s.monitorKVHash) s.GoAttach(s.monitorCompactHash) @@ -859,11 +578,11 @@ func (s *EtcdServer) start() { s.ctx, s.cancel = context.WithCancel(context.Background()) s.readwaitc = make(chan struct{}, 1) s.readNotifier = newNotifier() - s.leaderChanged = make(chan struct{}) + s.leaderChanged = notify.NewNotifier() if s.ClusterVersion() != nil { lg.Info( "starting etcd server", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberID().String()), zap.String("local-server-version", version.Version), zap.String("cluster-id", s.Cluster().ID().String()), zap.String("cluster-version", version.Cluster(s.ClusterVersion().String())), @@ -872,7 +591,7 @@ func (s *EtcdServer) start() { } else { lg.Info( "starting etcd server", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberID().String()), zap.String("local-server-version", version.Version), zap.String("cluster-version", "to_be_decided"), ) @@ -941,7 +660,7 @@ type ServerPeerV2 interface { DowngradeEnabledHandler() http.Handler } -func (s *EtcdServer) DowngradeInfo() *membership.DowngradeInfo { return s.cluster.DowngradeInfo() } +func (s *EtcdServer) DowngradeInfo() *serverversion.DowngradeInfo { return s.cluster.DowngradeInfo() } type downgradeEnabledHandler struct { lg *zap.Logger @@ -992,15 +711,15 @@ func (s *EtcdServer) Process(ctx context.Context, m raftpb.Message) error { if s.cluster.IsIDRemoved(types.ID(m.From)) { lg.Warn( "rejected Raft message from removed member", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberID().String()), zap.String("removed-member-id", types.ID(m.From).String()), ) return httptypes.NewHTTPError(http.StatusForbidden, "cannot process message from removed member") } - if s.ID() != types.ID(m.To) { + if s.MemberID() != types.ID(m.To) { lg.Warn( "rejected Raft message to mismatch member", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberID().String()), zap.String("mismatch-member-id", types.ID(m.To).String()), ) return httptypes.NewHTTPError(http.StatusForbidden, "cannot process message to mismatch member") @@ -1030,7 +749,7 @@ type etcdProgress struct { // raftReadyHandler contains a set of EtcdServer operations to be called by raftNode, // and helps decouple state machine logic from Raft algorithms. -// TODO: add a state machine interface to apply the commit entries and do snapshot/recover +// TODO: add a state machine interface to toApply the commit entries and do snapshot/recover type raftReadyHandler struct { getLead func() (lead uint64) updateLead func(lead uint64) @@ -1046,24 +765,9 @@ func (s *EtcdServer) run() { lg.Panic("failed to get snapshot from Raft storage", zap.Error(err)) } - // asynchronously accept apply packets, dispatch progress in-order - sched := schedule.NewFIFOScheduler() + // asynchronously accept toApply packets, dispatch progress in-order + sched := schedule.NewFIFOScheduler(lg) - var ( - smu sync.RWMutex - syncC <-chan time.Time - ) - setSyncC := func(ch <-chan time.Time) { - smu.Lock() - syncC = ch - smu.Unlock() - } - getSyncC := func() (ch <-chan time.Time) { - smu.RLock() - ch = syncC - smu.RUnlock() - return - } rh := &raftReadyHandler{ getLead: func() (lead uint64) { return s.getLead() }, updateLead: func(lead uint64) { s.setLead(lead) }, @@ -1075,7 +779,6 @@ func (s *EtcdServer) run() { if s.compactor != nil { s.compactor.Pause() } - setSyncC(nil) } else { if newLeader { t := time.Now() @@ -1083,17 +786,12 @@ func (s *EtcdServer) run() { s.leadElectedTime = t s.leadTimeMu.Unlock() } - setSyncC(s.SyncTicker.C) if s.compactor != nil { s.compactor.Resume() } } if newLeader { - s.leaderChangedMu.Lock() - lc := s.leaderChanged - s.leaderChanged = make(chan struct{}) - close(lc) - s.leaderChangedMu.Unlock() + s.leaderChanged.Notify() } // TODO: remove the nil checking // current test utility does not provide the stats @@ -1124,7 +822,7 @@ func (s *EtcdServer) run() { s.cancel() sched.Stop() - // wait for gouroutines before closing raft so wal stays open + // wait for goroutines before closing raft so wal stays open s.wg.Wait() s.SyncTicker.Stop() @@ -1146,7 +844,7 @@ func (s *EtcdServer) run() { for { select { case ap := <-s.r.apply(): - f := func(context.Context) { s.applyAll(&ep, &ap) } + f := schedule.NewJob("server_applyAll", func(context.Context) { s.applyAll(&ep, &ap) }) sched.Schedule(f) case leases := <-expiredLeaseC: s.revokeExpiredLeases(leases) @@ -1154,10 +852,6 @@ func (s *EtcdServer) run() { lg.Warn("server error", zap.Error(err)) lg.Warn("data-dir used by this member must be removed") return - case <-getSyncC(): - if s.v2store.HasTTLKeys() { - s.sync(s.Cfg.ReqTimeout()) - } case <-s.stop: return } @@ -1175,7 +869,7 @@ func (s *EtcdServer) revokeExpiredLeases(leases []*lease.Lease) { lg := s.Logger() if !s.ensureLeadership() { lg.Warn("Ignore the lease revoking request because current member isn't a leader", - zap.Uint64("local-member-id", uint64(s.ID()))) + zap.Uint64("local-member-id", uint64(s.MemberID()))) return } @@ -1239,11 +933,11 @@ func (s *EtcdServer) ensureLeadership() bool { return false } - newLeaderId := s.raftStatus().Lead - if newLeaderId != uint64(s.ID()) { + newLeaderID := s.raftStatus().Lead + if newLeaderID != uint64(s.MemberID()) { lg.Warn("Current member isn't a leader", - zap.Uint64("local-member-id", uint64(s.ID())), - zap.Uint64("new-lead", newLeaderId)) + zap.Uint64("local-member-id", uint64(s.MemberID())), + zap.Uint64("new-lead", newLeaderID)) return false } @@ -1272,16 +966,17 @@ func (s *EtcdServer) Cleanup() { } } -func (s *EtcdServer) applyAll(ep *etcdProgress, apply *apply) { +func (s *EtcdServer) applyAll(ep *etcdProgress, apply *toApply) { s.applySnapshot(ep, apply) s.applyEntries(ep, apply) + backend.VerifyBackendConsistency(s.Backend(), s.Logger(), true, schema.AllBuckets...) proposalsApplied.Set(float64(ep.appliedi)) s.applyWait.Trigger(ep.appliedi) // wait for the raft routine to finish the disk writes before triggering a // snapshot. or applied index might be greater than the last index in raft - // storage, since the raft routine might be slower than apply routine. + // storage, since the raft routine might be slower than toApply routine. <-apply.notifyc s.triggerSnapshot(ep) @@ -1294,8 +989,8 @@ func (s *EtcdServer) applyAll(ep *etcdProgress, apply *apply) { } } -func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) { - if raft.IsEmptySnap(apply.snapshot) { +func (s *EtcdServer) applySnapshot(ep *etcdProgress, toApply *toApply) { + if raft.IsEmptySnap(toApply.snapshot) { return } applySnapshotInProgress.Inc() @@ -1305,34 +1000,35 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) { "applying snapshot", zap.Uint64("current-snapshot-index", ep.snapi), zap.Uint64("current-applied-index", ep.appliedi), - zap.Uint64("incoming-leader-snapshot-index", apply.snapshot.Metadata.Index), - zap.Uint64("incoming-leader-snapshot-term", apply.snapshot.Metadata.Term), + zap.Uint64("incoming-leader-snapshot-index", toApply.snapshot.Metadata.Index), + zap.Uint64("incoming-leader-snapshot-term", toApply.snapshot.Metadata.Term), ) defer func() { lg.Info( "applied snapshot", zap.Uint64("current-snapshot-index", ep.snapi), zap.Uint64("current-applied-index", ep.appliedi), - zap.Uint64("incoming-leader-snapshot-index", apply.snapshot.Metadata.Index), - zap.Uint64("incoming-leader-snapshot-term", apply.snapshot.Metadata.Term), + zap.Uint64("incoming-leader-snapshot-index", toApply.snapshot.Metadata.Index), + zap.Uint64("incoming-leader-snapshot-term", toApply.snapshot.Metadata.Term), ) applySnapshotInProgress.Dec() }() - if apply.snapshot.Metadata.Index <= ep.appliedi { + if toApply.snapshot.Metadata.Index <= ep.appliedi { lg.Panic( "unexpected leader snapshot from outdated index", zap.Uint64("current-snapshot-index", ep.snapi), zap.Uint64("current-applied-index", ep.appliedi), - zap.Uint64("incoming-leader-snapshot-index", apply.snapshot.Metadata.Index), - zap.Uint64("incoming-leader-snapshot-term", apply.snapshot.Metadata.Term), + zap.Uint64("incoming-leader-snapshot-index", toApply.snapshot.Metadata.Index), + zap.Uint64("incoming-leader-snapshot-term", toApply.snapshot.Metadata.Term), ) } // wait for raftNode to persist snapshot onto the disk - <-apply.notifyc + <-toApply.notifyc - newbe, err := openSnapshotBackend(s.Cfg, s.snapshotter, apply.snapshot, s.beHooks) + // gofail: var applyBeforeOpenSnapshot struct{} + newbe, err := serverstorage.OpenSnapshotBackend(s.Cfg, s.snapshotter, toApply.snapshot, s.beHooks) if err != nil { lg.Panic("failed to open snapshot backend", zap.Error(err)) } @@ -1343,6 +1039,7 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) { // Eventually the new consistent_index value coming from snapshot is overwritten // by the old value. s.consistIndex.SetBackend(newbe) + verifySnapshotIndex(toApply.snapshot, s.consistIndex.ConsistentIndex()) // always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases. // If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers. @@ -1361,6 +1058,7 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) { } newbe.SetTxPostLockInsideApplyHook(s.getTxPostLockInsideApplyHook()) + lg.Info("restored mvcc store", zap.Uint64("consistent-index", s.consistIndex.ConsistentIndex())) // Closing old backend might block until all the txns @@ -1392,23 +1090,23 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) { if s.authStore != nil { lg.Info("restoring auth store") - s.authStore.Recover(newbe) + s.authStore.Recover(schema.NewAuthBackend(lg, newbe)) lg.Info("restored auth store") } lg.Info("restoring v2 store") - if err := s.v2store.Recovery(apply.snapshot.Data); err != nil { + if err := s.v2store.Recovery(toApply.snapshot.Data); err != nil { lg.Panic("failed to restore v2 store", zap.Error(err)) } - if err := assertNoV2StoreContent(lg, s.v2store, s.Cfg.V2Deprecation); err != nil { + if err := serverstorage.AssertNoV2StoreContent(lg, s.v2store, s.Cfg.V2Deprecation); err != nil { lg.Panic("illegal v2store content", zap.Error(err)) } lg.Info("restored v2 store") - s.cluster.SetBackend(newbe) + s.cluster.SetBackend(schema.NewMembershipBackend(lg, newbe)) lg.Info("restoring cluster configuration") @@ -1424,7 +1122,7 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) { lg.Info("adding peers from new cluster configuration") for _, m := range s.cluster.Members() { - if m.ID == s.ID() { + if m.ID == s.MemberID() { continue } s.r.transport.AddPeer(m.ID, m.PeerURLs) @@ -1432,13 +1130,38 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) { lg.Info("added peers from new cluster configuration") - ep.appliedt = apply.snapshot.Metadata.Term - ep.appliedi = apply.snapshot.Metadata.Index + ep.appliedt = toApply.snapshot.Metadata.Term + ep.appliedi = toApply.snapshot.Metadata.Index ep.snapi = ep.appliedi - ep.confState = apply.snapshot.Metadata.ConfState + ep.confState = toApply.snapshot.Metadata.ConfState + + // As backends and implementations like alarmsStore changed, we need + // to re-bootstrap Appliers. + s.uberApply = s.NewUberApplier() +} + +func (s *EtcdServer) NewUberApplier() apply.UberApplier { + return apply.NewUberApplier(s.lg, s.be, s.KV(), s.alarmStore, s.authStore, s.lessor, s.cluster, s, s, s.consistIndex, + s.Cfg.WarningApplyDuration, s.Cfg.ExperimentalTxnModeWriteWithSharedBuffer, s.Cfg.QuotaBackendBytes) } -func (s *EtcdServer) applyEntries(ep *etcdProgress, apply *apply) { +func verifySnapshotIndex(snapshot raftpb.Snapshot, cindex uint64) { + verify.Verify(func() { + if cindex != snapshot.Metadata.Index { + panic(fmt.Sprintf("consistent_index(%d) isn't equal to snapshot index (%d)", cindex, snapshot.Metadata.Index)) + } + }) +} + +func verifyConsistentIndexIsLatest(lg *zap.Logger, snapshot raftpb.Snapshot, cindex uint64) { + verify.Verify(func() { + if cindex < snapshot.Metadata.Index { + lg.Panic(fmt.Sprintf("consistent_index(%d) is older than snapshot index (%d)", cindex, snapshot.Metadata.Index)) + } + }) +} + +func (s *EtcdServer) applyEntries(ep *etcdProgress, apply *toApply) { if len(apply.entries) == 0 { return } @@ -1459,41 +1182,52 @@ func (s *EtcdServer) applyEntries(ep *etcdProgress, apply *apply) { return } var shouldstop bool - if ep.appliedt, ep.appliedi, shouldstop = s.apply(ents, &ep.confState); shouldstop { + if ep.appliedt, ep.appliedi, shouldstop = s.apply(ents, &ep.confState, apply.raftAdvancedC); shouldstop { go s.stopWithDelay(10*100*time.Millisecond, fmt.Errorf("the member has been permanently removed from the cluster")) } } +func (s *EtcdServer) ForceSnapshot() { + s.forceSnapshot = true +} + func (s *EtcdServer) triggerSnapshot(ep *etcdProgress) { - if ep.appliedi-ep.snapi <= s.Cfg.SnapshotCount { + if !s.shouldSnapshot(ep) { return } - lg := s.Logger() lg.Info( "triggering snapshot", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberID().String()), zap.Uint64("local-member-applied-index", ep.appliedi), zap.Uint64("local-member-snapshot-index", ep.snapi), zap.Uint64("local-member-snapshot-count", s.Cfg.SnapshotCount), + zap.Bool("snapshot-forced", s.forceSnapshot), ) + s.forceSnapshot = false s.snapshot(ep.appliedi, ep.confState) + s.compactRaftLog(ep.appliedi) ep.snapi = ep.appliedi } +func (s *EtcdServer) shouldSnapshot(ep *etcdProgress) bool { + return (s.forceSnapshot && ep.appliedi != ep.snapi) || (ep.appliedi-ep.snapi > s.Cfg.SnapshotCount) +} + func (s *EtcdServer) hasMultipleVotingMembers() bool { return s.cluster != nil && len(s.cluster.VotingMemberIDs()) > 1 } func (s *EtcdServer) isLeader() bool { - return uint64(s.ID()) == s.Lead() + return uint64(s.MemberID()) == s.Lead() } // MoveLeader transfers the leader to the given transferee. func (s *EtcdServer) MoveLeader(ctx context.Context, lead, transferee uint64) error { - if !s.cluster.IsMemberExist(types.ID(transferee)) || s.cluster.Member(types.ID(transferee)).IsLearner { - return ErrBadLeaderTransferee + member := s.cluster.Member(types.ID(transferee)) + if member == nil || member.IsLearner { + return errors.ErrBadLeaderTransferee } now := time.Now() @@ -1502,7 +1236,7 @@ func (s *EtcdServer) MoveLeader(ctx context.Context, lead, transferee uint64) er lg := s.Logger() lg.Info( "leadership transfer starting", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberID().String()), zap.String("current-leader-member-id", types.ID(lead).String()), zap.String("transferee-member-id", types.ID(transferee).String()), ) @@ -1511,7 +1245,7 @@ func (s *EtcdServer) MoveLeader(ctx context.Context, lead, transferee uint64) er for s.Lead() != transferee { select { case <-ctx.Done(): // time out - return ErrTimeoutLeaderTransfer + return errors.ErrTimeoutLeaderTransfer case <-time.After(interval): } } @@ -1519,7 +1253,7 @@ func (s *EtcdServer) MoveLeader(ctx context.Context, lead, transferee uint64) er // TODO: drain all requests, or drop all messages to the old leader lg.Info( "leadership transfer finished", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberID().String()), zap.String("old-leader-member-id", types.ID(lead).String()), zap.String("new-leader-member-id", types.ID(transferee).String()), zap.Duration("took", time.Since(now)), @@ -1527,13 +1261,13 @@ func (s *EtcdServer) MoveLeader(ctx context.Context, lead, transferee uint64) er return nil } -// TransferLeadership transfers the leader to the chosen transferee. -func (s *EtcdServer) TransferLeadership() error { +// TryTransferLeadershipOnShutdown transfers the leader to the chosen transferee. It is only used in server graceful shutdown. +func (s *EtcdServer) TryTransferLeadershipOnShutdown() error { lg := s.Logger() if !s.isLeader() { lg.Info( "skipped leadership transfer; local server is not leader", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberID().String()), zap.String("current-leader-member-id", types.ID(s.Lead()).String()), ) return nil @@ -1542,7 +1276,7 @@ func (s *EtcdServer) TransferLeadership() error { if !s.hasMultipleVotingMembers() { lg.Info( "skipped leadership transfer for single voting member cluster", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberID().String()), zap.String("current-leader-member-id", types.ID(s.Lead()).String()), ) return nil @@ -1550,7 +1284,7 @@ func (s *EtcdServer) TransferLeadership() error { transferee, ok := longestConnected(s.r.transport, s.cluster.VotingMemberIDs()) if !ok { - return ErrUnhealthy + return errors.ErrUnhealthy } tm := s.Cfg.ReqTimeout() @@ -1578,8 +1312,8 @@ func (s *EtcdServer) HardStop() { // Do and Process cannot be called after Stop has been invoked. func (s *EtcdServer) Stop() { lg := s.Logger() - if err := s.TransferLeadership(); err != nil { - lg.Warn("leadership transfer failed", zap.String("local-member-id", s.ID().String()), zap.Error(err)) + if err := s.TryTransferLeadershipOnShutdown(); err != nil { + lg.Warn("leadership transfer failed", zap.String("local-member-id", s.MemberID().String()), zap.Error(err)) } s.HardStop() } @@ -1599,26 +1333,14 @@ func (s *EtcdServer) stopWithDelay(d time.Duration, err error) { } } -// StopNotify returns a channel that receives a empty struct +// StopNotify returns a channel that receives an empty struct // when the server is stopped. func (s *EtcdServer) StopNotify() <-chan struct{} { return s.done } -// StoppingNotify returns a channel that receives a empty struct +// StoppingNotify returns a channel that receives an empty struct // when the server is being stopped. func (s *EtcdServer) StoppingNotify() <-chan struct{} { return s.stopping } -func (s *EtcdServer) SelfStats() []byte { return s.stats.JSON() } - -func (s *EtcdServer) LeaderStats() []byte { - lead := s.getLead() - if lead != uint64(s.id) { - return nil - } - return s.lstats.JSON() -} - -func (s *EtcdServer) StoreStats() []byte { return s.v2store.JsonStats() } - func (s *EtcdServer) checkMembershipOperationPermission(ctx context.Context) error { if s.authStore == nil { // In the context of ordinary etcd process, s.authStore will never be nil. @@ -1628,7 +1350,7 @@ func (s *EtcdServer) checkMembershipOperationPermission(ctx context.Context) err // Note that this permission check is done in the API layer, // so TOCTOU problem can be caused potentially in a schedule like this: - // update membership with user A -> revoke root role of A -> apply membership change + // update membership with user A -> revoke root role of A -> toApply membership change // in the state machine layer // However, both of membership change and role management requires the root privilege. // So careful operation by admins can prevent the problem. @@ -1679,21 +1401,21 @@ func (s *EtcdServer) mayAddMember(memb membership.Member) error { if !memb.IsLearner && !s.cluster.IsReadyToAddVotingMember() { lg.Warn( "rejecting member add request; not enough healthy members", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberID().String()), zap.String("requested-member-add", fmt.Sprintf("%+v", memb)), - zap.Error(ErrNotEnoughStartedMembers), + zap.Error(errors.ErrNotEnoughStartedMembers), ) - return ErrNotEnoughStartedMembers + return errors.ErrNotEnoughStartedMembers } - if !isConnectedFullySince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), s.cluster.VotingMembers()) { + if !isConnectedFullySince(s.r.transport, time.Now().Add(-HealthInterval), s.MemberID(), s.cluster.VotingMembers()) { lg.Warn( "rejecting member add request; local member has not been connected to all peers, reconfigure breaks active quorum", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberID().String()), zap.String("requested-member-add", fmt.Sprintf("%+v", memb)), - zap.Error(ErrUnhealthy), + zap.Error(errors.ErrUnhealthy), ) - return ErrUnhealthy + return errors.ErrUnhealthy } return nil @@ -1726,7 +1448,7 @@ func (s *EtcdServer) PromoteMember(ctx context.Context, id uint64) ([]*membershi learnerPromoteSucceed.Inc() return resp, nil } - if err != ErrNotLeader { + if !errorspkg.Is(err, errors.ErrNotLeader) { learnerPromoteFailed.WithLabelValues(err.Error()).Inc() return resp, err } @@ -1745,16 +1467,16 @@ func (s *EtcdServer) PromoteMember(ctx context.Context, id uint64) ([]*membershi return resp, nil } // If member promotion failed, return early. Otherwise keep retry. - if err == ErrLearnerNotReady || err == membership.ErrIDNotFound || err == membership.ErrMemberNotLearner { + if errorspkg.Is(err, errors.ErrLearnerNotReady) || errorspkg.Is(err, membership.ErrIDNotFound) || errorspkg.Is(err, membership.ErrMemberNotLearner) { return nil, err } } } - if cctx.Err() == context.DeadlineExceeded { - return nil, ErrTimeout + if errorspkg.Is(cctx.Err(), context.DeadlineExceeded) { + return nil, errors.ErrTimeout } - return nil, ErrCanceled + return nil, errors.ErrCanceled } // promoteMember checks whether the to-be-promoted learner node is ready before sending the promote @@ -1797,8 +1519,7 @@ func (s *EtcdServer) promoteMember(ctx context.Context, id uint64) ([]*membershi func (s *EtcdServer) mayPromoteMember(id types.ID) error { lg := s.Logger() - err := s.isLearnerReady(uint64(id)) - if err != nil { + if err := s.isLearnerReady(lg, uint64(id)); err != nil { return err } @@ -1808,11 +1529,11 @@ func (s *EtcdServer) mayPromoteMember(id types.ID) error { if !s.cluster.IsReadyToPromoteMember(uint64(id)) { lg.Warn( "rejecting member promote request; not enough healthy members", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberID().String()), zap.String("requested-member-remove-id", id.String()), - zap.Error(ErrNotEnoughStartedMembers), + zap.Error(errors.ErrNotEnoughStartedMembers), ) - return ErrNotEnoughStartedMembers + return errors.ErrNotEnoughStartedMembers } return nil @@ -1820,8 +1541,8 @@ func (s *EtcdServer) mayPromoteMember(id types.ID) error { // check whether the learner catches up with leader or not. // Note: it will return nil if member is not found in cluster or if member is not learner. -// These two conditions will be checked before apply phase later. -func (s *EtcdServer) isLearnerReady(id uint64) error { +// These two conditions will be checked before toApply phase later. +func (s *EtcdServer) isLearnerReady(lg *zap.Logger, id uint64) error { if err := s.waitAppliedIndex(); err != nil { return err } @@ -1830,7 +1551,7 @@ func (s *EtcdServer) isLearnerReady(id uint64) error { // leader's raftStatus.Progress is not nil if rs.Progress == nil { - return ErrNotLeader + return errors.ErrNotLeader } var learnerMatch uint64 @@ -1852,9 +1573,17 @@ func (s *EtcdServer) isLearnerReady(id uint64) error { } leaderMatch := rs.Progress[leaderID].Match + + learnerReadyPercent := float64(learnerMatch) / float64(leaderMatch) + // the learner's Match not caught up with leader yet - if float64(learnerMatch) < float64(leaderMatch)*readyPercent { - return ErrLearnerNotReady + if learnerReadyPercent < readyPercentThreshold { + lg.Error( + "rejecting promote learner: learner is not ready", + zap.Float64("learner-ready-percent", learnerReadyPercent), + zap.Float64("ready-percent-threshold", readyPercentThreshold), + ) + return errors.ErrLearnerNotReady } return nil @@ -1866,39 +1595,39 @@ func (s *EtcdServer) mayRemoveMember(id types.ID) error { } lg := s.Logger() - isLearner := s.cluster.IsMemberExist(id) && s.cluster.Member(id).IsLearner + member := s.cluster.Member(id) // no need to check quorum when removing non-voting member - if isLearner { + if member != nil && member.IsLearner { return nil } if !s.cluster.IsReadyToRemoveVotingMember(uint64(id)) { lg.Warn( "rejecting member remove request; not enough healthy members", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberID().String()), zap.String("requested-member-remove-id", id.String()), - zap.Error(ErrNotEnoughStartedMembers), + zap.Error(errors.ErrNotEnoughStartedMembers), ) - return ErrNotEnoughStartedMembers + return errors.ErrNotEnoughStartedMembers } // downed member is safe to remove since it's not part of the active quorum - if t := s.r.transport.ActiveSince(id); id != s.ID() && t.IsZero() { + if t := s.r.transport.ActiveSince(id); id != s.MemberID() && t.IsZero() { return nil } // protect quorum if some members are down m := s.cluster.VotingMembers() - active := numConnectedSince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), m) + active := numConnectedSince(s.r.transport, time.Now().Add(-HealthInterval), s.MemberID(), m) if (active - 1) < 1+((len(m)-1)/2) { lg.Warn( "rejecting member remove request; local member has not been connected to all peers, reconfigure breaks active quorum", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberID().String()), zap.String("requested-member-remove", id.String()), zap.Int("active-peers", active), - zap.Error(ErrUnhealthy), + zap.Error(errors.ErrUnhealthy), ) - return ErrUnhealthy + return errors.ErrUnhealthy } return nil @@ -1954,9 +1683,7 @@ func (s *EtcdServer) getLead() uint64 { } func (s *EtcdServer) LeaderChangedNotify() <-chan struct{} { - s.leaderChangedMu.RLock() - defer s.leaderChangedMu.RUnlock() - return s.leaderChanged + return s.leaderChanged.Receive() } // FirstCommitInTermNotify returns channel that will be unlocked on first @@ -1964,21 +1691,18 @@ func (s *EtcdServer) LeaderChangedNotify() <-chan struct{} { // read-only requests (leader is not able to respond any read-only requests // as long as linearizable semantic is required) func (s *EtcdServer) FirstCommitInTermNotify() <-chan struct{} { - s.firstCommitInTermMu.RLock() - defer s.firstCommitInTermMu.RUnlock() - return s.firstCommitInTermC + return s.firstCommitInTerm.Receive() } -// RaftStatusGetter represents etcd server and Raft progress. -type RaftStatusGetter interface { - ID() types.ID - Leader() types.ID - CommittedIndex() uint64 - AppliedIndex() uint64 - Term() uint64 -} +// MemberId returns the ID of the local member. +// Deprecated: Please use (*EtcdServer) MemberID instead. +// +//revive:disable:var-naming +func (s *EtcdServer) MemberId() types.ID { return s.MemberID() } -func (s *EtcdServer) ID() types.ID { return s.id } +//revive:enable:var-naming + +func (s *EtcdServer) MemberID() types.ID { return s.memberID } func (s *EtcdServer) Leader() types.ID { return types.ID(s.getLead()) } @@ -1991,8 +1715,9 @@ func (s *EtcdServer) AppliedIndex() uint64 { return s.getAppliedIndex() } func (s *EtcdServer) Term() uint64 { return s.getTerm() } type confChangeResponse struct { - membs []*membership.Member - err error + membs []*membership.Member + raftAdvanceC <-chan struct{} + err error } // configure sends a configuration change through consensus and @@ -2015,9 +1740,14 @@ func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) ([]*me lg.Panic("failed to configure") } resp := x.(*confChangeResponse) + // etcdserver need to ensure the raft has already been notified + // or advanced before it responds to the client. Otherwise, the + // following config change request may be rejected. + // See https://github.com/etcd-io/etcd/issues/15528. + <-resp.raftAdvanceC lg.Info( "applied a configuration change through raft", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberID().String()), zap.String("raft-conf-change", cc.Type.String()), zap.String("raft-conf-change-node-id", types.ID(cc.NodeID).String()), ) @@ -2028,29 +1758,10 @@ func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) ([]*me return nil, s.parseProposeCtxErr(ctx.Err(), start) case <-s.stopping: - return nil, ErrStopped + return nil, errors.ErrStopped } } -// sync proposes a SYNC request and is non-blocking. -// This makes no guarantee that the request will be proposed or performed. -// The request will be canceled after the given timeout. -func (s *EtcdServer) sync(timeout time.Duration) { - req := pb.Request{ - Method: "SYNC", - ID: s.reqIDGen.Next(), - Time: time.Now().UnixNano(), - } - data := pbutil.MustMarshal(&req) - // There is no promise that node has leader when do SYNC request, - // so it uses goroutine to propose. - ctx, cancel := context.WithTimeout(s.ctx, timeout) - s.GoAttach(func() { - s.r.Propose(ctx, data) - cancel() - }) -} - // publishV3 registers server information into the cluster using v3 request. The // information is the JSON representation of this server's member struct, updated // with the static clientURLs of the server. @@ -2058,7 +1769,7 @@ func (s *EtcdServer) sync(timeout time.Duration) { // or its server is stopped. func (s *EtcdServer) publishV3(timeout time.Duration) { req := &membershippb.ClusterMemberAttrSetRequest{ - Member_ID: uint64(s.id), + Member_ID: uint64(s.MemberID()), MemberAttributes: &membershippb.Attributes{ Name: s.attributes.Name, ClientUrls: s.attributes.ClientURLs, @@ -2070,7 +1781,7 @@ func (s *EtcdServer) publishV3(timeout time.Duration) { case <-s.stopping: lg.Warn( "stopped publish because server is stopping", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberID().String()), zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)), zap.Duration("publish-timeout", timeout), ) @@ -2087,82 +1798,18 @@ func (s *EtcdServer) publishV3(timeout time.Duration) { close(s.readych) lg.Info( "published local member to cluster through raft", - zap.String("local-member-id", s.ID().String()), - zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)), - zap.String("cluster-id", s.cluster.ID().String()), - zap.Duration("publish-timeout", timeout), - ) - return - - default: - lg.Warn( - "failed to publish local member to cluster through raft", - zap.String("local-member-id", s.ID().String()), - zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)), - zap.Duration("publish-timeout", timeout), - zap.Error(err), - ) - } - } -} - -// publish registers server information into the cluster. The information -// is the JSON representation of this server's member struct, updated with the -// static clientURLs of the server. -// The function keeps attempting to register until it succeeds, -// or its server is stopped. -// -// Use v2 store to encode member attributes, and apply through Raft -// but does not go through v2 API endpoint, which means even with v2 -// client handler disabled (e.g. --enable-v2=false), cluster can still -// process publish requests through rafthttp -// TODO: Remove in 3.6 (start using publishV3) -func (s *EtcdServer) publish(timeout time.Duration) { - lg := s.Logger() - b, err := json.Marshal(s.attributes) - if err != nil { - lg.Panic("failed to marshal JSON", zap.Error(err)) - return - } - req := pb.Request{ - Method: "PUT", - Path: membership.MemberAttributesStorePath(s.id), - Val: string(b), - } - - for { - ctx, cancel := context.WithTimeout(s.ctx, timeout) - _, err := s.Do(ctx, req) - cancel() - switch err { - case nil: - close(s.readych) - lg.Info( - "published local member to cluster through raft", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberID().String()), zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)), - zap.String("request-path", req.Path), zap.String("cluster-id", s.cluster.ID().String()), zap.Duration("publish-timeout", timeout), ) return - case ErrStopped: - lg.Warn( - "stopped publish because server is stopped", - zap.String("local-member-id", s.ID().String()), - zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)), - zap.Duration("publish-timeout", timeout), - zap.Error(err), - ) - return - default: lg.Warn( "failed to publish local member to cluster through raft", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberID().String()), zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)), - zap.String("request-path", req.Path), zap.Duration("publish-timeout", timeout), zap.Error(err), ) @@ -2175,7 +1822,7 @@ func (s *EtcdServer) sendMergedSnap(merged snap.Message) { lg := s.Logger() fields := []zap.Field{ - zap.String("from", s.ID().String()), + zap.String("from", s.MemberID().String()), zap.String("to", types.ID(merged.To).String()), zap.Int64("bytes", merged.TotalSize), zap.String("size", humanize.Bytes(uint64(merged.TotalSize))), @@ -2210,45 +1857,48 @@ func (s *EtcdServer) sendMergedSnap(merged snap.Message) { }) } -// apply takes entries received from Raft (after it has been committed) and +// toApply takes entries received from Raft (after it has been committed) and // applies them to the current state of the EtcdServer. // The given entries should not be empty. func (s *EtcdServer) apply( es []raftpb.Entry, confState *raftpb.ConfState, + raftAdvancedC <-chan struct{}, ) (appliedt uint64, appliedi uint64, shouldStop bool) { s.lg.Debug("Applying entries", zap.Int("num-entries", len(es))) for i := range es { e := es[i] + index := s.consistIndex.ConsistentIndex() s.lg.Debug("Applying entry", - zap.Uint64("index", e.Index), - zap.Uint64("term", e.Term), - zap.Stringer("type", e.Type)) + zap.Uint64("consistent-index", index), + zap.Uint64("entry-index", e.Index), + zap.Uint64("entry-term", e.Term), + zap.Stringer("entry-type", e.Type)) + + // We need to toApply all WAL entries on top of v2store + // and only 'unapplied' (e.Index>backend.ConsistentIndex) on the backend. + shouldApplyV3 := membership.ApplyV2storeOnly + if e.Index > index { + shouldApplyV3 = membership.ApplyBoth + // set the consistent index of current executing entry + s.consistIndex.SetConsistentApplyingIndex(e.Index, e.Term) + } switch e.Type { case raftpb.EntryNormal: // gofail: var beforeApplyOneEntryNormal struct{} - s.applyEntryNormal(&e) + s.applyEntryNormal(&e, shouldApplyV3) s.setAppliedIndex(e.Index) s.setTerm(e.Term) case raftpb.EntryConfChange: - // We need to apply all WAL entries on top of v2store - // and only 'unapplied' (e.Index>backend.ConsistentIndex) on the backend. - shouldApplyV3 := membership.ApplyV2storeOnly - - // set the consistent index of current executing entry - if e.Index > s.consistIndex.ConsistentIndex() { - s.consistIndex.SetConsistentApplyingIndex(e.Index, e.Term) - shouldApplyV3 = membership.ApplyBoth - } - + // gofail: var beforeApplyOneConfChange struct{} var cc raftpb.ConfChange pbutil.MustUnmarshal(&cc, e.Data) removedSelf, err := s.applyConfChange(cc, confState, shouldApplyV3) s.setAppliedIndex(e.Index) s.setTerm(e.Term) shouldStop = shouldStop || removedSelf - s.w.Trigger(cc.ID, &confChangeResponse{s.cluster.Members(), err}) + s.w.Trigger(cc.ID, &confChangeResponse{s.cluster.Members(), raftAdvancedC, err}) default: lg := s.Logger() @@ -2262,15 +1912,10 @@ func (s *EtcdServer) apply( return appliedt, appliedi, shouldStop } -// applyEntryNormal apples an EntryNormal type raftpb request to the EtcdServer -func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) { - shouldApplyV3 := membership.ApplyV2storeOnly - var ar *applyResult - index := s.consistIndex.ConsistentIndex() - if e.Index > index { - // set the consistent index of current executing entry - s.consistIndex.SetConsistentApplyingIndex(e.Index, e.Term) - shouldApplyV3 = membership.ApplyBoth +// applyEntryNormal applies an EntryNormal type raftpb request to the EtcdServer +func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry, shouldApplyV3 membership.ShouldApplyV3) { + var ar *apply.Result + if shouldApplyV3 { defer func() { // The txPostLockInsideApplyHook will not get called in some cases, // in which we should move the consistent index forward directly. @@ -2280,15 +1925,11 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) { } }() } - s.lg.Debug("apply entry normal", - zap.Uint64("consistent-index", index), - zap.Uint64("entry-index", e.Index), - zap.Bool("should-applyV3", bool(shouldApplyV3))) // raft state machine may generate noop entry when leader confirmation. // skip it in advance to avoid some potential bug in the future if len(e.Data) == 0 { - s.notifyAboutFirstCommitInTerm() + s.firstCommitInTerm.Notify() // promote lessor when the local member is leader and finished // applying all entries from the last term. @@ -2304,19 +1945,24 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) { rp := &r pbutil.MustUnmarshal(rp, e.Data) s.lg.Debug("applyEntryNormal", zap.Stringer("V2request", rp)) - s.w.Trigger(r.ID, s.applyV2Request((*RequestV2)(rp), shouldApplyV3)) + s.applyV2Request((*RequestV2)(rp), shouldApplyV3) + s.w.Trigger(r.ID, Response{}) return } s.lg.Debug("applyEntryNormal", zap.Stringer("raftReq", &raftReq)) if raftReq.V2 != nil { req := (*RequestV2)(raftReq.V2) - s.w.Trigger(req.ID, s.applyV2Request(req, shouldApplyV3)) + s.applyV2Request(req, shouldApplyV3) + s.w.Trigger(req.ID, Response{}) return } id := raftReq.ID if id == 0 { + if raftReq.Header == nil { + s.lg.Panic("applyEntryNormal, could not find a header") + } id = raftReq.Header.ID } @@ -2325,10 +1971,10 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) { if !needResult && raftReq.Txn != nil { removeNeedlessRangeReqs(raftReq.Txn) } - ar = s.applyV3.Apply(&raftReq, shouldApplyV3) + ar = s.applyInternalRaftRequest(&raftReq, shouldApplyV3) } - // do not re-apply applied entries. + // do not re-toApply applied entries. if !shouldApplyV3 { return } @@ -2337,7 +1983,7 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) { return } - if ar.err != ErrNoSpace || len(s.alarmStore.Get(pb.AlarmType_NOSPACE)) > 0 { + if !errorspkg.Is(ar.Err, errors.ErrNoSpace) || len(s.alarmStore.Get(pb.AlarmType_NOSPACE)) > 0 { s.w.Trigger(id, ar) return } @@ -2347,12 +1993,12 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) { "message exceeded backend quota; raising alarm", zap.Int64("quota-size-bytes", s.Cfg.QuotaBackendBytes), zap.String("quota-size", humanize.Bytes(uint64(s.Cfg.QuotaBackendBytes))), - zap.Error(ar.err), + zap.Error(ar.Err), ) s.GoAttach(func() { a := &pb.AlarmRequest{ - MemberID: uint64(s.ID()), + MemberID: uint64(s.MemberID()), Action: pb.AlarmRequest_ACTIVATE, Alarm: pb.AlarmType_NOSPACE, } @@ -2361,13 +2007,57 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) { }) } -func (s *EtcdServer) notifyAboutFirstCommitInTerm() { - newNotifier := make(chan struct{}) - s.firstCommitInTermMu.Lock() - notifierToClose := s.firstCommitInTermC - s.firstCommitInTermC = newNotifier - s.firstCommitInTermMu.Unlock() - close(notifierToClose) +func (s *EtcdServer) applyInternalRaftRequest(r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *apply.Result { + if r.ClusterVersionSet == nil && r.ClusterMemberAttrSet == nil && r.DowngradeInfoSet == nil { + if !shouldApplyV3 { + return nil + } + return s.uberApply.Apply(r) + } + membershipApplier := apply.NewApplierMembership(s.lg, s.cluster, s) + op := "unknown" + defer func(start time.Time) { + txn.ApplySecObserve("v3", op, true, time.Since(start)) + txn.WarnOfExpensiveRequest(s.lg, s.Cfg.WarningApplyDuration, start, &pb.InternalRaftStringer{Request: r}, nil, nil) + }(time.Now()) + switch { + case r.ClusterVersionSet != nil: + op = "ClusterVersionSet" // Implemented in 3.5.x + membershipApplier.ClusterVersionSet(r.ClusterVersionSet, shouldApplyV3) + return &apply.Result{} + case r.ClusterMemberAttrSet != nil: + op = "ClusterMemberAttrSet" // Implemented in 3.5.x + membershipApplier.ClusterMemberAttrSet(r.ClusterMemberAttrSet, shouldApplyV3) + case r.DowngradeInfoSet != nil: + op = "DowngradeInfoSet" // Implemented in 3.5.x + membershipApplier.DowngradeInfoSet(r.DowngradeInfoSet, shouldApplyV3) + default: + s.lg.Panic("not implemented apply", zap.Stringer("raft-request", r)) + return nil + } + return &apply.Result{} +} + +func noSideEffect(r *pb.InternalRaftRequest) bool { + return r.Range != nil || r.AuthUserGet != nil || r.AuthRoleGet != nil || r.AuthStatus != nil +} + +func removeNeedlessRangeReqs(txn *pb.TxnRequest) { + f := func(ops []*pb.RequestOp) []*pb.RequestOp { + j := 0 + for i := 0; i < len(ops); i++ { + if _, ok := ops[i].Request.(*pb.RequestOp_RequestRange); ok { + continue + } + ops[j] = ops[i] + j++ + } + + return ops[:j] + } + + txn.Success = f(txn.Success) + txn.Failure = f(txn.Failure) } // applyConfChange applies a ConfChange to the server. It is only @@ -2408,24 +2098,15 @@ func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.Con } else { s.cluster.AddMember(&confChangeContext.Member, shouldApplyV3) - if confChangeContext.Member.ID != s.id { + if confChangeContext.Member.ID != s.MemberID() { s.r.transport.AddPeer(confChangeContext.Member.ID, confChangeContext.PeerURLs) } } - // update the isLearner metric when this server id is equal to the id in raft member confChange - if confChangeContext.Member.ID == s.id { - if cc.Type == raftpb.ConfChangeAddLearnerNode { - isLearner.Set(1) - } else { - isLearner.Set(0) - } - } - case raftpb.ConfChangeRemoveNode: id := types.ID(cc.NodeID) s.cluster.RemoveMember(id, shouldApplyV3) - if id == s.id { + if id == s.MemberID() { return true, nil } s.r.transport.RemovePeer(id) @@ -2443,7 +2124,7 @@ func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.Con ) } s.cluster.UpdateRaftAttributes(m.ID, m.RaftAttributes, shouldApplyV3) - if m.ID != s.id { + if m.ID != s.MemberID() { s.r.transport.UpdatePeer(m.ID, m.PeerURLs) } } @@ -2452,7 +2133,7 @@ func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.Con // TODO: non-blocking snapshot func (s *EtcdServer) snapshot(snapi uint64, confState raftpb.ConfState) { - clone := s.v2store.Clone() + d := GetMembershipInfoInV2Format(s.Logger(), s.cluster) // commit kv to write metadata (for example: consistent index) to disk. // // This guarantees that Backend's consistent_index is >= index of last snapshot. @@ -2460,71 +2141,71 @@ func (s *EtcdServer) snapshot(snapi uint64, confState raftpb.ConfState) { // KV().commit() updates the consistent index in backend. // All operations that update consistent index must be called sequentially // from applyAll function. - // So KV().Commit() cannot run in parallel with apply. It has to be called outside + // So KV().Commit() cannot run in parallel with toApply. It has to be called outside // the go routine created below. s.KV().Commit() - s.GoAttach(func() { - lg := s.Logger() + lg := s.Logger() - d, err := clone.SaveNoCopy() - // TODO: current store will never fail to do a snapshot - // what should we do if the store might fail? - if err != nil { - lg.Panic("failed to save v2 store", zap.Error(err)) - } - snap, err := s.r.raftStorage.CreateSnapshot(snapi, &confState, d) - if err != nil { - // the snapshot was done asynchronously with the progress of raft. - // raft might have already got a newer snapshot. - if err == raft.ErrSnapOutOfDate { - return - } - lg.Panic("failed to create snapshot", zap.Error(err)) - } - // SaveSnap saves the snapshot to file and appends the corresponding WAL entry. - if err = s.r.storage.SaveSnap(snap); err != nil { - lg.Panic("failed to save snapshot", zap.Error(err)) - } - if err = s.r.storage.Release(snap); err != nil { - lg.Panic("failed to release wal", zap.Error(err)) + // For backward compatibility, generate v2 snapshot from v3 state. + snap, err := s.r.raftStorage.CreateSnapshot(snapi, &confState, d) + if err != nil { + // the snapshot was done asynchronously with the progress of raft. + // raft might have already got a newer snapshot. + if errorspkg.Is(err, raft.ErrSnapOutOfDate) { + return } + lg.Panic("failed to create snapshot", zap.Error(err)) + } - lg.Info( - "saved snapshot", - zap.Uint64("snapshot-index", snap.Metadata.Index), - ) + verifyConsistentIndexIsLatest(lg, snap, s.consistIndex.ConsistentIndex()) - // When sending a snapshot, etcd will pause compaction. - // After receives a snapshot, the slow follower needs to get all the entries right after - // the snapshot sent to catch up. If we do not pause compaction, the log entries right after - // the snapshot sent might already be compacted. It happens when the snapshot takes long time - // to send and save. Pausing compaction avoids triggering a snapshot sending cycle. - if atomic.LoadInt64(&s.inflightSnapshots) != 0 { - lg.Info("skip compaction since there is an inflight snapshot") - return - } + // SaveSnap saves the snapshot to file and appends the corresponding WAL entry. + if err = s.r.storage.SaveSnap(snap); err != nil { + lg.Panic("failed to save snapshot", zap.Error(err)) + } + if err = s.r.storage.Release(snap); err != nil { + lg.Panic("failed to release wal", zap.Error(err)) + } - // keep some in memory log entries for slow followers. - compacti := uint64(1) - if snapi > s.Cfg.SnapshotCatchUpEntries { - compacti = snapi - s.Cfg.SnapshotCatchUpEntries - } + lg.Info( + "saved snapshot", + zap.Uint64("snapshot-index", snap.Metadata.Index), + ) +} - err = s.r.raftStorage.Compact(compacti) - if err != nil { - // the compaction was done asynchronously with the progress of raft. - // raft log might already been compact. - if err == raft.ErrCompacted { - return - } - lg.Panic("failed to compact", zap.Error(err)) +func (s *EtcdServer) compactRaftLog(snapi uint64) { + lg := s.Logger() + + // When sending a snapshot, etcd will pause compaction. + // After receives a snapshot, the slow follower needs to get all the entries right after + // the snapshot sent to catch up. If we do not pause compaction, the log entries right after + // the snapshot sent might already be compacted. It happens when the snapshot takes long time + // to send and save. Pausing compaction avoids triggering a snapshot sending cycle. + if atomic.LoadInt64(&s.inflightSnapshots) != 0 { + lg.Info("skip compaction since there is an inflight snapshot") + return + } + + // keep some in memory log entries for slow followers. + compacti := uint64(1) + if snapi > s.Cfg.SnapshotCatchUpEntries { + compacti = snapi - s.Cfg.SnapshotCatchUpEntries + } + + err := s.r.raftStorage.Compact(compacti) + if err != nil { + // the compaction was done asynchronously with the progress of raft. + // raft log might already been compact. + if errorspkg.Is(err, raft.ErrCompacted) { + return } - lg.Info( - "compacted Raft logs", - zap.Uint64("compact-index", compacti), - ) - }) + lg.Panic("failed to compact", zap.Error(err)) + } + lg.Info( + "compacted Raft logs", + zap.Uint64("compact-index", compacti), + ) } // CutPeer drops messages to the specified peer. @@ -2554,48 +2235,55 @@ func (s *EtcdServer) ClusterVersion() *semver.Version { return s.cluster.Version() } -// monitorVersions checks the member's version every monitorVersionInterval. -// It updates the cluster version if all members agrees on a higher one. -// It prints out log if there is a member with a higher version than the -// local version. -// TODO switch to updateClusterVersionV3 in 3.6 -func (s *EtcdServer) monitorVersions() { +func (s *EtcdServer) StorageVersion() *semver.Version { + // `applySnapshot` sets a new backend instance, so we need to acquire the bemu lock. + s.bemu.RLock() + defer s.bemu.RUnlock() + + v, err := schema.DetectSchemaVersion(s.lg, s.be.ReadTx()) + if err != nil { + s.lg.Warn("Failed to detect schema version", zap.Error(err)) + return nil + } + return &v +} + +// monitorClusterVersions every monitorVersionInterval checks if it's the leader and updates cluster version if needed. +func (s *EtcdServer) monitorClusterVersions() { + lg := s.Logger() + monitor := serverversion.NewMonitor(lg, NewServerVersionAdapter(s)) for { select { - case <-s.FirstCommitInTermNotify(): + case <-s.firstCommitInTerm.Receive(): case <-time.After(monitorVersionInterval): case <-s.stopping: + lg.Info("server has stopped; stopping cluster version's monitor") return } - if s.Leader() != s.ID() { + if s.Leader() != s.MemberID() { continue } - - v := decideClusterVersion(s.Logger(), getVersions(s.Logger(), s.cluster, s.id, s.peerRt)) - if v != nil { - // only keep major.minor version for comparison - v = &semver.Version{ - Major: v.Major, - Minor: v.Minor, - } - } - - // if the current version is nil: - // 1. use the decided version if possible - // 2. or use the min cluster version - if s.cluster.Version() == nil { - verStr := version.MinClusterVersion - if v != nil { - verStr = v.String() - } - s.GoAttach(func() { s.updateClusterVersionV2(verStr) }) - continue + err := monitor.UpdateClusterVersionIfNeeded() + if err != nil { + s.lg.Error("Failed to monitor cluster version", zap.Error(err)) } + } +} - if v != nil && membership.IsValidVersionChange(s.cluster.Version(), v) { - s.GoAttach(func() { s.updateClusterVersionV2(v.String()) }) +// monitorStorageVersion every monitorVersionInterval updates storage version if needed. +func (s *EtcdServer) monitorStorageVersion() { + lg := s.Logger() + monitor := serverversion.NewMonitor(lg, NewServerVersionAdapter(s)) + for { + select { + case <-time.After(monitorVersionInterval): + case <-s.clusterVersionChanged.Receive(): + case <-s.stopping: + lg.Info("server has stopped; stopping storage version's monitor") + return } + monitor.UpdateStorageVersionIfNeeded() } } @@ -2604,19 +2292,23 @@ func (s *EtcdServer) monitorKVHash() { if t == 0 { return } + checkTicker := time.NewTicker(t) + defer checkTicker.Stop() lg := s.Logger() lg.Info( "enabled corruption checking", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberID().String()), zap.Duration("interval", t), ) for { select { case <-s.stopping: + lg.Info("server has stopped; stopping kv hash's monitor") return - case <-time.After(t): + case <-checkTicker.C: } + backend.VerifyBackendConsistency(s.be, lg, false, schema.AllBuckets...) if !s.isLeader() { continue } @@ -2635,6 +2327,8 @@ func (s *EtcdServer) monitorCompactHash() { select { case <-time.After(t): case <-s.stopping: + lg := s.Logger() + lg.Info("server has stopped; stopping compact hash's monitor") return } if !s.isLeader() { @@ -2644,46 +2338,6 @@ func (s *EtcdServer) monitorCompactHash() { } } -func (s *EtcdServer) updateClusterVersionV2(ver string) { - lg := s.Logger() - - if s.cluster.Version() == nil { - lg.Info( - "setting up initial cluster version using v2 API", - zap.String("cluster-version", version.Cluster(ver)), - ) - } else { - lg.Info( - "updating cluster version using v2 API", - zap.String("from", version.Cluster(s.cluster.Version().String())), - zap.String("to", version.Cluster(ver)), - ) - } - - req := pb.Request{ - Method: "PUT", - Path: membership.StoreClusterVersionKey(), - Val: ver, - } - - ctx, cancel := context.WithTimeout(s.ctx, s.Cfg.ReqTimeout()) - _, err := s.Do(ctx, req) - cancel() - - switch err { - case nil: - lg.Info("cluster version is updated", zap.String("cluster-version", version.Cluster(ver))) - return - - case ErrStopped: - lg.Warn("aborting cluster version update; server is stopped", zap.Error(err)) - return - - default: - lg.Warn("failed to update cluster version", zap.Error(err)) - } -} - func (s *EtcdServer) updateClusterVersionV3(ver string) { lg := s.Logger() @@ -2706,12 +2360,12 @@ func (s *EtcdServer) updateClusterVersionV3(ver string) { _, err := s.raftRequest(ctx, pb.InternalRaftRequest{ClusterVersionSet: &req}) cancel() - switch err { - case nil: + switch { + case errorspkg.Is(err, nil): lg.Info("cluster version is updated", zap.String("cluster-version", version.Cluster(ver))) return - case ErrStopped: + case errorspkg.Is(err, errors.ErrStopped): lg.Warn("aborting cluster version update; server is stopped", zap.Error(err)) return @@ -2720,12 +2374,13 @@ func (s *EtcdServer) updateClusterVersionV3(ver string) { } } +// monitorDowngrade every DowngradeCheckTime checks if it's the leader and cancels downgrade if needed. func (s *EtcdServer) monitorDowngrade() { + monitor := serverversion.NewMonitor(s.Logger(), NewServerVersionAdapter(s)) t := s.Cfg.DowngradeCheckTime if t == 0 { return } - lg := s.Logger() for { select { case <-time.After(t): @@ -2736,52 +2391,37 @@ func (s *EtcdServer) monitorDowngrade() { if !s.isLeader() { continue } - - d := s.cluster.DowngradeInfo() - if !d.Enabled { - continue - } - - targetVersion := d.TargetVersion - v := semver.Must(semver.NewVersion(targetVersion)) - if isMatchedVersions(s.Logger(), v, getVersions(s.Logger(), s.cluster, s.id, s.peerRt)) { - lg.Info("the cluster has been downgraded", zap.String("cluster-version", targetVersion)) - ctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout()) - if _, err := s.downgradeCancel(ctx); err != nil { - lg.Warn("failed to cancel downgrade", zap.Error(err)) - } - cancel() - } + monitor.CancelDowngradeIfNeeded() } } func (s *EtcdServer) parseProposeCtxErr(err error, start time.Time) error { - switch err { - case context.Canceled: - return ErrCanceled + switch { + case errorspkg.Is(err, context.Canceled): + return errors.ErrCanceled - case context.DeadlineExceeded: + case errorspkg.Is(err, context.DeadlineExceeded): s.leadTimeMu.RLock() curLeadElected := s.leadElectedTime s.leadTimeMu.RUnlock() prevLeadLost := curLeadElected.Add(-2 * time.Duration(s.Cfg.ElectionTicks) * time.Duration(s.Cfg.TickMs) * time.Millisecond) if start.After(prevLeadLost) && start.Before(curLeadElected) { - return ErrTimeoutDueToLeaderFail + return errors.ErrTimeoutDueToLeaderFail } lead := types.ID(s.getLead()) switch lead { case types.ID(raft.None): // TODO: return error to specify it happens because the cluster does not have leader now - case s.ID(): - if !isConnectedToQuorumSince(s.r.transport, start, s.ID(), s.cluster.Members()) { - return ErrTimeoutDueToConnectionLost + case s.MemberID(): + if !isConnectedToQuorumSince(s.r.transport, start, s.MemberID(), s.cluster.Members()) { + return errors.ErrTimeoutDueToConnectionLost } default: if !isConnectedSince(s.r.transport, start, lead) { - return ErrTimeoutDueToConnectionLost + return errors.ErrTimeoutDueToConnectionLost } } - return ErrTimeout + return errors.ErrTimeout default: return err @@ -2790,26 +2430,19 @@ func (s *EtcdServer) parseProposeCtxErr(err error, start time.Time) error { func (s *EtcdServer) KV() mvcc.WatchableKV { return s.kv } func (s *EtcdServer) Backend() backend.Backend { - s.bemu.Lock() - defer s.bemu.Unlock() + s.bemu.RLock() + defer s.bemu.RUnlock() return s.be } func (s *EtcdServer) AuthStore() auth.AuthStore { return s.authStore } func (s *EtcdServer) restoreAlarms() error { - s.applyV3 = s.newApplierV3() - as, err := v3alarm.NewAlarmStore(s.lg, s) + as, err := v3alarm.NewAlarmStore(s.lg, schema.NewAlarmBackend(s.lg, s.be)) if err != nil { return err } s.alarmStore = as - if len(as.Get(pb.AlarmType_NOSPACE)) > 0 { - s.applyV3 = newApplierV3Capped(s.applyV3) - } - if len(as.Get(pb.AlarmType_CORRUPT)) > 0 { - s.applyV3 = newApplierV3Corrupt(s.applyV3) - } return nil } @@ -2854,6 +2487,10 @@ func (s *EtcdServer) raftStatus() raft.Status { return s.r.Node.Status() } +func (s *EtcdServer) Version() *serverversion.Manager { + return serverversion.NewManager(s.Logger(), NewServerVersionAdapter(s)) +} + func (s *EtcdServer) getTxPostLockInsideApplyHook() func() { return func() { applyingIdx, applyingTerm := s.consistIndex.ConsistentApplyingIndex() @@ -2863,25 +2500,6 @@ func (s *EtcdServer) getTxPostLockInsideApplyHook() func() { } } -func maybeDefragBackend(cfg config.ServerConfig, be backend.Backend) error { - size := be.Size() - sizeInUse := be.SizeInUse() - freeableMemory := uint(size - sizeInUse) - thresholdBytes := cfg.ExperimentalBootstrapDefragThresholdMegabytes * 1024 * 1024 - if freeableMemory < thresholdBytes { - cfg.Logger.Info("Skipping defragmentation", - zap.Int64("current-db-size-bytes", size), - zap.String("current-db-size", humanize.Bytes(uint64(size))), - zap.Int64("current-db-size-in-use-bytes", sizeInUse), - zap.String("current-db-size-in-use", humanize.Bytes(uint64(sizeInUse))), - zap.Uint("experimental-bootstrap-defrag-threshold-bytes", thresholdBytes), - zap.String("experimental-bootstrap-defrag-threshold", humanize.Bytes(uint64(thresholdBytes))), - ) - return nil - } - return be.Defrag() -} - func (s *EtcdServer) CorruptionChecker() CorruptionChecker { return s.corruptionChecker } diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/snapshot_merge.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/snapshot_merge.go index 72d10c1796d6d..cc3c545bee2a2 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/snapshot_merge.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/snapshot_merge.go @@ -17,12 +17,12 @@ package etcdserver import ( "io" - "go.etcd.io/etcd/raft/v3/raftpb" - "go.etcd.io/etcd/server/v3/etcdserver/api/snap" - "go.etcd.io/etcd/server/v3/mvcc/backend" - humanize "github.com/dustin/go-humanize" "go.uber.org/zap" + + "go.etcd.io/etcd/server/v3/etcdserver/api/snap" + "go.etcd.io/etcd/server/v3/storage/backend" + "go.etcd.io/raft/v3/raftpb" ) // createMergedSnapshotMessage creates a snapshot message that contains: raft status (term, conf), @@ -31,11 +31,7 @@ import ( func (s *EtcdServer) createMergedSnapshotMessage(m raftpb.Message, snapt, snapi uint64, confState raftpb.ConfState) snap.Message { lg := s.Logger() // get a snapshot of v2 store as []byte - clone := s.v2store.Clone() - d, err := clone.SaveNoCopy() - if err != nil { - lg.Panic("failed to save v2 store data", zap.Error(err)) - } + d := GetMembershipInfoInV2Format(lg, s.cluster) // commit kv to write metadata(for example: consistent index). s.KV().Commit() @@ -53,7 +49,9 @@ func (s *EtcdServer) createMergedSnapshotMessage(m raftpb.Message, snapt, snapi }, Data: d, } - m.Snapshot = snapshot + m.Snapshot = &snapshot + + verifySnapshotIndex(snapshot, s.consistIndex.ConsistentIndex()) return *snap.NewMessage(m, rc, dbsnap.Size()) } diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/txn/metrics.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/txn/metrics.go new file mode 100644 index 0000000000000..e66254b1c6088 --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/txn/metrics.go @@ -0,0 +1,67 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package txn + +import ( + "strconv" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +var ( + slowApplies = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "etcd", + Subsystem: "server", + Name: "slow_apply_total", + Help: "The total number of slow apply requests (likely overloaded from slow disk).", + }) + applySec = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: "etcd", + Subsystem: "server", + Name: "apply_duration_seconds", + Help: "The latency distributions of v2 apply called by backend.", + + // lowest bucket start of upper bound 0.0001 sec (0.1 ms) with factor 2 + // highest bucket start of 0.0001 sec * 2^19 == 52.4288 sec + Buckets: prometheus.ExponentialBuckets(0.0001, 2, 20), + }, + []string{"version", "op", "success"}) + rangeSec = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: "etcd", + Subsystem: "server", + Name: "range_duration_seconds", + Help: "The latency distributions of txn.Range", + + // lowest bucket start of upper bound 0.0001 sec (0.1 ms) with factor 2 + // highest bucket start of 0.0001 sec * 2^19 == 52.4288 sec + Buckets: prometheus.ExponentialBuckets(0.0001, 2, 20), + }, + []string{"success"}) +) + +func ApplySecObserve(version, op string, success bool, latency time.Duration) { + applySec.WithLabelValues(version, op, strconv.FormatBool(success)).Observe(float64(latency.Microseconds()) / 1000000.0) +} + +func RangeSecObserve(success bool, latency time.Duration) { + rangeSec.WithLabelValues(strconv.FormatBool(success)).Observe(float64(latency.Microseconds()) / 1000000.0) +} + +func init() { + prometheus.MustRegister(applySec) + prometheus.MustRegister(rangeSec) + prometheus.MustRegister(slowApplies) +} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/txn/txn.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/txn/txn.go new file mode 100644 index 0000000000000..220b7d315800f --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/txn/txn.go @@ -0,0 +1,723 @@ +// Copyright 2022 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package txn + +import ( + "bytes" + "context" + "fmt" + "sort" + "time" + + "go.uber.org/zap" + + pb "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.etcd.io/etcd/api/v3/mvccpb" + "go.etcd.io/etcd/pkg/v3/traceutil" + "go.etcd.io/etcd/server/v3/auth" + "go.etcd.io/etcd/server/v3/etcdserver/errors" + "go.etcd.io/etcd/server/v3/lease" + "go.etcd.io/etcd/server/v3/storage/mvcc" +) + +func Put(ctx context.Context, lg *zap.Logger, lessor lease.Lessor, kv mvcc.KV, p *pb.PutRequest) (resp *pb.PutResponse, trace *traceutil.Trace, err error) { + trace = traceutil.Get(ctx) + // create put tracing if the trace in context is empty + if trace.IsEmpty() { + trace = traceutil.New("put", + lg, + traceutil.Field{Key: "key", Value: string(p.Key)}, + traceutil.Field{Key: "req_size", Value: p.Size()}, + ) + ctx = context.WithValue(ctx, traceutil.TraceKey{}, trace) + } + leaseID := lease.LeaseID(p.Lease) + if leaseID != lease.NoLease { + if l := lessor.Lookup(leaseID); l == nil { + return nil, nil, lease.ErrLeaseNotFound + } + } + txnWrite := kv.Write(trace) + defer txnWrite.End() + resp, err = put(ctx, txnWrite, p) + return resp, trace, err +} + +func put(ctx context.Context, txnWrite mvcc.TxnWrite, p *pb.PutRequest) (resp *pb.PutResponse, err error) { + trace := traceutil.Get(ctx) + resp = &pb.PutResponse{} + resp.Header = &pb.ResponseHeader{} + val, leaseID := p.Value, lease.LeaseID(p.Lease) + + var rr *mvcc.RangeResult + if p.IgnoreValue || p.IgnoreLease || p.PrevKv { + trace.StepWithFunction(func() { + rr, err = txnWrite.Range(context.TODO(), p.Key, nil, mvcc.RangeOptions{}) + }, "get previous kv pair") + + if err != nil { + return nil, err + } + } + if p.IgnoreValue || p.IgnoreLease { + if rr == nil || len(rr.KVs) == 0 { + // ignore_{lease,value} flag expects previous key-value pair + return nil, errors.ErrKeyNotFound + } + } + if p.IgnoreValue { + val = rr.KVs[0].Value + } + if p.IgnoreLease { + leaseID = lease.LeaseID(rr.KVs[0].Lease) + } + if p.PrevKv { + if rr != nil && len(rr.KVs) != 0 { + resp.PrevKv = &rr.KVs[0] + } + } + + resp.Header.Revision = txnWrite.Put(p.Key, val, leaseID) + trace.AddField(traceutil.Field{Key: "response_revision", Value: resp.Header.Revision}) + return resp, nil +} + +func DeleteRange(ctx context.Context, lg *zap.Logger, kv mvcc.KV, dr *pb.DeleteRangeRequest) (resp *pb.DeleteRangeResponse, trace *traceutil.Trace, err error) { + trace = traceutil.Get(ctx) + // create delete tracing if the trace in context is empty + if trace.IsEmpty() { + trace = traceutil.New("delete_range", + lg, + traceutil.Field{Key: "key", Value: string(dr.Key)}, + traceutil.Field{Key: "range_end", Value: string(dr.RangeEnd)}, + ) + ctx = context.WithValue(ctx, traceutil.TraceKey{}, trace) + } + txnWrite := kv.Write(trace) + defer txnWrite.End() + resp, err = deleteRange(ctx, txnWrite, dr) + return resp, trace, err +} + +func deleteRange(ctx context.Context, txnWrite mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { + resp := &pb.DeleteRangeResponse{} + resp.Header = &pb.ResponseHeader{} + end := mkGteRange(dr.RangeEnd) + + if dr.PrevKv { + rr, err := txnWrite.Range(ctx, dr.Key, end, mvcc.RangeOptions{}) + if err != nil { + return nil, err + } + if rr != nil { + resp.PrevKvs = make([]*mvccpb.KeyValue, len(rr.KVs)) + for i := range rr.KVs { + resp.PrevKvs[i] = &rr.KVs[i] + } + } + } + + resp.Deleted, resp.Header.Revision = txnWrite.DeleteRange(dr.Key, end) + return resp, nil +} + +func Range(ctx context.Context, lg *zap.Logger, kv mvcc.KV, r *pb.RangeRequest) (resp *pb.RangeResponse, trace *traceutil.Trace, err error) { + trace = traceutil.Get(ctx) + if trace.IsEmpty() { + trace = traceutil.New("range", lg) + ctx = context.WithValue(ctx, traceutil.TraceKey{}, trace) + } + defer func(start time.Time) { + success := err == nil + RangeSecObserve(success, time.Since(start)) + }(time.Now()) + txnRead := kv.Read(mvcc.ConcurrentReadTxMode, trace) + defer txnRead.End() + resp, err = executeRange(ctx, lg, txnRead, r) + return resp, trace, err +} + +func executeRange(ctx context.Context, lg *zap.Logger, txnRead mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) { + trace := traceutil.Get(ctx) + + resp := &pb.RangeResponse{} + resp.Header = &pb.ResponseHeader{} + + limit := r.Limit + if r.SortOrder != pb.RangeRequest_NONE || + r.MinModRevision != 0 || r.MaxModRevision != 0 || + r.MinCreateRevision != 0 || r.MaxCreateRevision != 0 { + // fetch everything; sort and truncate afterwards + limit = 0 + } + if limit > 0 { + // fetch one extra for 'more' flag + limit = limit + 1 + } + + ro := mvcc.RangeOptions{ + Limit: limit, + Rev: r.Revision, + Count: r.CountOnly, + } + + rr, err := txnRead.Range(ctx, r.Key, mkGteRange(r.RangeEnd), ro) + if err != nil { + return nil, err + } + + if r.MaxModRevision != 0 { + f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision > r.MaxModRevision } + pruneKVs(rr, f) + } + if r.MinModRevision != 0 { + f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision < r.MinModRevision } + pruneKVs(rr, f) + } + if r.MaxCreateRevision != 0 { + f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision > r.MaxCreateRevision } + pruneKVs(rr, f) + } + if r.MinCreateRevision != 0 { + f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision < r.MinCreateRevision } + pruneKVs(rr, f) + } + + sortOrder := r.SortOrder + if r.SortTarget != pb.RangeRequest_KEY && sortOrder == pb.RangeRequest_NONE { + // Since current mvcc.Range implementation returns results + // sorted by keys in lexiographically ascending order, + // sort ASCEND by default only when target is not 'KEY' + sortOrder = pb.RangeRequest_ASCEND + } else if r.SortTarget == pb.RangeRequest_KEY && sortOrder == pb.RangeRequest_ASCEND { + // Since current mvcc.Range implementation returns results + // sorted by keys in lexiographically ascending order, + // don't re-sort when target is 'KEY' and order is ASCEND + sortOrder = pb.RangeRequest_NONE + } + if sortOrder != pb.RangeRequest_NONE { + var sorter sort.Interface + switch { + case r.SortTarget == pb.RangeRequest_KEY: + sorter = &kvSortByKey{&kvSort{rr.KVs}} + case r.SortTarget == pb.RangeRequest_VERSION: + sorter = &kvSortByVersion{&kvSort{rr.KVs}} + case r.SortTarget == pb.RangeRequest_CREATE: + sorter = &kvSortByCreate{&kvSort{rr.KVs}} + case r.SortTarget == pb.RangeRequest_MOD: + sorter = &kvSortByMod{&kvSort{rr.KVs}} + case r.SortTarget == pb.RangeRequest_VALUE: + sorter = &kvSortByValue{&kvSort{rr.KVs}} + default: + lg.Panic("unexpected sort target", zap.Int32("sort-target", int32(r.SortTarget))) + } + switch { + case sortOrder == pb.RangeRequest_ASCEND: + sort.Sort(sorter) + case sortOrder == pb.RangeRequest_DESCEND: + sort.Sort(sort.Reverse(sorter)) + } + } + + if r.Limit > 0 && len(rr.KVs) > int(r.Limit) { + rr.KVs = rr.KVs[:r.Limit] + resp.More = true + } + trace.Step("filter and sort the key-value pairs") + resp.Header.Revision = rr.Rev + resp.Count = int64(rr.Count) + resp.Kvs = make([]*mvccpb.KeyValue, len(rr.KVs)) + for i := range rr.KVs { + if r.KeysOnly { + rr.KVs[i].Value = nil + } + resp.Kvs[i] = &rr.KVs[i] + } + trace.Step("assemble the response") + return resp, nil +} + +func Txn(ctx context.Context, lg *zap.Logger, rt *pb.TxnRequest, txnModeWriteWithSharedBuffer bool, kv mvcc.KV, lessor lease.Lessor) (*pb.TxnResponse, *traceutil.Trace, error) { + trace := traceutil.Get(ctx) + if trace.IsEmpty() { + trace = traceutil.New("transaction", lg) + ctx = context.WithValue(ctx, traceutil.TraceKey{}, trace) + } + isWrite := !IsTxnReadonly(rt) + // When the transaction contains write operations, we use ReadTx instead of + // ConcurrentReadTx to avoid extra overhead of copying buffer. + var mode mvcc.ReadTxMode + if isWrite && txnModeWriteWithSharedBuffer /*a.s.Cfg.ExperimentalTxnModeWriteWithSharedBuffer*/ { + mode = mvcc.SharedBufReadTxMode + } else { + mode = mvcc.ConcurrentReadTxMode + } + txnRead := kv.Read(mode, trace) + var txnPath []bool + trace.StepWithFunction( + func() { + txnPath = compareToPath(txnRead, rt) + }, + "compare", + ) + if isWrite { + trace.AddField(traceutil.Field{Key: "read_only", Value: false}) + } + _, err := checkTxn(txnRead, rt, lessor, txnPath) + if err != nil { + txnRead.End() + return nil, nil, err + } + trace.Step("check requests") + // When executing mutable txnWrite ops, etcd must hold the txnWrite lock so + // readers do not see any intermediate results. Since writes are + // serialized on the raft loop, the revision in the read view will + // be the revision of the write txnWrite. + var txnWrite mvcc.TxnWrite + if isWrite { + txnRead.End() + txnWrite = kv.Write(trace) + } else { + txnWrite = mvcc.NewReadOnlyTxnWrite(txnRead) + } + txnResp, err := txn(ctx, lg, txnWrite, rt, isWrite, txnPath) + txnWrite.End() + + trace.AddField( + traceutil.Field{Key: "number_of_response", Value: len(txnResp.Responses)}, + traceutil.Field{Key: "response_revision", Value: txnResp.Header.Revision}, + ) + return txnResp, trace, err +} + +func txn(ctx context.Context, lg *zap.Logger, txnWrite mvcc.TxnWrite, rt *pb.TxnRequest, isWrite bool, txnPath []bool) (*pb.TxnResponse, error) { + txnResp, _ := newTxnResp(rt, txnPath) + _, err := executeTxn(ctx, lg, txnWrite, rt, txnPath, txnResp) + if err != nil { + if isWrite { + // CAUTION: When a txn performing write operations starts, we always expect it to be successful. + // If a write failure is seen we SHOULD NOT try to recover the server, but crash with a panic to make the failure explicit. + // Trying to silently recover (e.g by ignoring the failed txn or calling txn.End() early) poses serious risks: + // - violation of transaction atomicity if some write operations have been partially executed + // - data inconsistency across different etcd members if they applied the txn asymmetrically + lg.Panic("unexpected error during txn with writes", zap.Error(err)) + } else { + lg.Error("unexpected error during readonly txn", zap.Error(err)) + } + } + rev := txnWrite.Rev() + if len(txnWrite.Changes()) != 0 { + rev++ + } + txnResp.Header.Revision = rev + return txnResp, err +} + +// newTxnResp allocates a txn response for a txn request given a path. +func newTxnResp(rt *pb.TxnRequest, txnPath []bool) (txnResp *pb.TxnResponse, txnCount int) { + reqs := rt.Success + if !txnPath[0] { + reqs = rt.Failure + } + resps := make([]*pb.ResponseOp, len(reqs)) + txnResp = &pb.TxnResponse{ + Responses: resps, + Succeeded: txnPath[0], + Header: &pb.ResponseHeader{}, + } + for i, req := range reqs { + switch tv := req.Request.(type) { + case *pb.RequestOp_RequestRange: + resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponseRange{}} + case *pb.RequestOp_RequestPut: + resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponsePut{}} + case *pb.RequestOp_RequestDeleteRange: + resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponseDeleteRange{}} + case *pb.RequestOp_RequestTxn: + resp, txns := newTxnResp(tv.RequestTxn, txnPath[1:]) + resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponseTxn{ResponseTxn: resp}} + txnPath = txnPath[1+txns:] + txnCount += txns + 1 + default: + } + } + return txnResp, txnCount +} + +func executeTxn(ctx context.Context, lg *zap.Logger, txnWrite mvcc.TxnWrite, rt *pb.TxnRequest, txnPath []bool, tresp *pb.TxnResponse) (txns int, err error) { + trace := traceutil.Get(ctx) + reqs := rt.Success + if !txnPath[0] { + reqs = rt.Failure + } + + for i, req := range reqs { + respi := tresp.Responses[i].Response + switch tv := req.Request.(type) { + case *pb.RequestOp_RequestRange: + trace.StartSubTrace( + traceutil.Field{Key: "req_type", Value: "range"}, + traceutil.Field{Key: "range_begin", Value: string(tv.RequestRange.Key)}, + traceutil.Field{Key: "range_end", Value: string(tv.RequestRange.RangeEnd)}) + resp, err := executeRange(ctx, lg, txnWrite, tv.RequestRange) + if err != nil { + return 0, fmt.Errorf("applyTxn: failed Range: %w", err) + } + respi.(*pb.ResponseOp_ResponseRange).ResponseRange = resp + trace.StopSubTrace() + case *pb.RequestOp_RequestPut: + trace.StartSubTrace( + traceutil.Field{Key: "req_type", Value: "put"}, + traceutil.Field{Key: "key", Value: string(tv.RequestPut.Key)}, + traceutil.Field{Key: "req_size", Value: tv.RequestPut.Size()}) + resp, err := put(ctx, txnWrite, tv.RequestPut) + if err != nil { + return 0, fmt.Errorf("applyTxn: failed Put: %w", err) + } + respi.(*pb.ResponseOp_ResponsePut).ResponsePut = resp + trace.StopSubTrace() + case *pb.RequestOp_RequestDeleteRange: + resp, err := deleteRange(ctx, txnWrite, tv.RequestDeleteRange) + if err != nil { + return 0, fmt.Errorf("applyTxn: failed DeleteRange: %w", err) + } + respi.(*pb.ResponseOp_ResponseDeleteRange).ResponseDeleteRange = resp + case *pb.RequestOp_RequestTxn: + resp := respi.(*pb.ResponseOp_ResponseTxn).ResponseTxn + applyTxns, err := executeTxn(ctx, lg, txnWrite, tv.RequestTxn, txnPath[1:], resp) + if err != nil { + // don't wrap the error. It's a recursive call and err should be already wrapped + return 0, err + } + txns += applyTxns + 1 + txnPath = txnPath[applyTxns+1:] + default: + // empty union + } + } + return txns, nil +} + +func checkPut(rv mvcc.ReadView, lessor lease.Lessor, req *pb.PutRequest) error { + if req.IgnoreValue || req.IgnoreLease { + // expects previous key-value, error if not exist + rr, err := rv.Range(context.TODO(), req.Key, nil, mvcc.RangeOptions{}) + if err != nil { + return err + } + if rr == nil || len(rr.KVs) == 0 { + return errors.ErrKeyNotFound + } + } + if lease.LeaseID(req.Lease) != lease.NoLease { + if l := lessor.Lookup(lease.LeaseID(req.Lease)); l == nil { + return lease.ErrLeaseNotFound + } + } + return nil +} + +func checkRange(rv mvcc.ReadView, req *pb.RangeRequest) error { + switch { + case req.Revision == 0: + return nil + case req.Revision > rv.Rev(): + return mvcc.ErrFutureRev + case req.Revision < rv.FirstRev(): + return mvcc.ErrCompacted + } + return nil +} + +func checkTxn(rv mvcc.ReadView, rt *pb.TxnRequest, lessor lease.Lessor, txnPath []bool) (int, error) { + txnCount := 0 + reqs := rt.Success + if !txnPath[0] { + reqs = rt.Failure + } + for _, req := range reqs { + var err error + var txns int + switch tv := req.Request.(type) { + case *pb.RequestOp_RequestRange: + err = checkRange(rv, tv.RequestRange) + case *pb.RequestOp_RequestPut: + err = checkPut(rv, lessor, tv.RequestPut) + case *pb.RequestOp_RequestDeleteRange: + case *pb.RequestOp_RequestTxn: + txns, err = checkTxn(rv, tv.RequestTxn, lessor, txnPath[1:]) + txnCount += txns + 1 + txnPath = txnPath[txns+1:] + default: + // empty union + } + if err != nil { + return 0, err + } + } + return txnCount, nil +} + +// mkGteRange determines if the range end is a >= range. This works around grpc +// sending empty byte strings as nil; >= is encoded in the range end as '\0'. +// If it is a GTE range, then []byte{} is returned to indicate the empty byte +// string (vs nil being no byte string). +func mkGteRange(rangeEnd []byte) []byte { + if len(rangeEnd) == 1 && rangeEnd[0] == 0 { + return []byte{} + } + return rangeEnd +} + +func pruneKVs(rr *mvcc.RangeResult, isPrunable func(*mvccpb.KeyValue) bool) { + j := 0 + for i := range rr.KVs { + rr.KVs[j] = rr.KVs[i] + if !isPrunable(&rr.KVs[i]) { + j++ + } + } + rr.KVs = rr.KVs[:j] +} + +type kvSort struct{ kvs []mvccpb.KeyValue } + +func (s *kvSort) Swap(i, j int) { + t := s.kvs[i] + s.kvs[i] = s.kvs[j] + s.kvs[j] = t +} +func (s *kvSort) Len() int { return len(s.kvs) } + +type kvSortByKey struct{ *kvSort } + +func (s *kvSortByKey) Less(i, j int) bool { + return bytes.Compare(s.kvs[i].Key, s.kvs[j].Key) < 0 +} + +type kvSortByVersion struct{ *kvSort } + +func (s *kvSortByVersion) Less(i, j int) bool { + return (s.kvs[i].Version - s.kvs[j].Version) < 0 +} + +type kvSortByCreate struct{ *kvSort } + +func (s *kvSortByCreate) Less(i, j int) bool { + return (s.kvs[i].CreateRevision - s.kvs[j].CreateRevision) < 0 +} + +type kvSortByMod struct{ *kvSort } + +func (s *kvSortByMod) Less(i, j int) bool { + return (s.kvs[i].ModRevision - s.kvs[j].ModRevision) < 0 +} + +type kvSortByValue struct{ *kvSort } + +func (s *kvSortByValue) Less(i, j int) bool { + return bytes.Compare(s.kvs[i].Value, s.kvs[j].Value) < 0 +} + +func compareInt64(a, b int64) int { + switch { + case a < b: + return -1 + case a > b: + return 1 + default: + return 0 + } +} + +func compareToPath(rv mvcc.ReadView, rt *pb.TxnRequest) []bool { + txnPath := make([]bool, 1) + ops := rt.Success + if txnPath[0] = applyCompares(rv, rt.Compare); !txnPath[0] { + ops = rt.Failure + } + for _, op := range ops { + tv, ok := op.Request.(*pb.RequestOp_RequestTxn) + if !ok || tv.RequestTxn == nil { + continue + } + txnPath = append(txnPath, compareToPath(rv, tv.RequestTxn)...) + } + return txnPath +} + +func applyCompares(rv mvcc.ReadView, cmps []*pb.Compare) bool { + for _, c := range cmps { + if !applyCompare(rv, c) { + return false + } + } + return true +} + +// applyCompare applies the compare request. +// If the comparison succeeds, it returns true. Otherwise, returns false. +func applyCompare(rv mvcc.ReadView, c *pb.Compare) bool { + // TODO: possible optimizations + // * chunk reads for large ranges to conserve memory + // * rewrite rules for common patterns: + // ex. "[a, b) createrev > 0" => "limit 1 /\ kvs > 0" + // * caching + rr, err := rv.Range(context.TODO(), c.Key, mkGteRange(c.RangeEnd), mvcc.RangeOptions{}) + if err != nil { + return false + } + if len(rr.KVs) == 0 { + if c.Target == pb.Compare_VALUE { + // Always fail if comparing a value on a key/keys that doesn't exist; + // nil == empty string in grpc; no way to represent missing value + return false + } + return compareKV(c, mvccpb.KeyValue{}) + } + for _, kv := range rr.KVs { + if !compareKV(c, kv) { + return false + } + } + return true +} + +func compareKV(c *pb.Compare, ckv mvccpb.KeyValue) bool { + var result int + rev := int64(0) + switch c.Target { + case pb.Compare_VALUE: + var v []byte + if tv, _ := c.TargetUnion.(*pb.Compare_Value); tv != nil { + v = tv.Value + } + result = bytes.Compare(ckv.Value, v) + case pb.Compare_CREATE: + if tv, _ := c.TargetUnion.(*pb.Compare_CreateRevision); tv != nil { + rev = tv.CreateRevision + } + result = compareInt64(ckv.CreateRevision, rev) + case pb.Compare_MOD: + if tv, _ := c.TargetUnion.(*pb.Compare_ModRevision); tv != nil { + rev = tv.ModRevision + } + result = compareInt64(ckv.ModRevision, rev) + case pb.Compare_VERSION: + if tv, _ := c.TargetUnion.(*pb.Compare_Version); tv != nil { + rev = tv.Version + } + result = compareInt64(ckv.Version, rev) + case pb.Compare_LEASE: + if tv, _ := c.TargetUnion.(*pb.Compare_Lease); tv != nil { + rev = tv.Lease + } + result = compareInt64(ckv.Lease, rev) + } + switch c.Result { + case pb.Compare_EQUAL: + return result == 0 + case pb.Compare_NOT_EQUAL: + return result != 0 + case pb.Compare_GREATER: + return result > 0 + case pb.Compare_LESS: + return result < 0 + } + return true +} + +func IsTxnSerializable(r *pb.TxnRequest) bool { + for _, u := range r.Success { + if r := u.GetRequestRange(); r == nil || !r.Serializable { + return false + } + } + for _, u := range r.Failure { + if r := u.GetRequestRange(); r == nil || !r.Serializable { + return false + } + } + return true +} + +func IsTxnReadonly(r *pb.TxnRequest) bool { + for _, u := range r.Success { + if r := u.GetRequestRange(); r == nil { + return false + } + } + for _, u := range r.Failure { + if r := u.GetRequestRange(); r == nil { + return false + } + } + return true +} + +func CheckTxnAuth(as auth.AuthStore, ai *auth.AuthInfo, rt *pb.TxnRequest) error { + for _, c := range rt.Compare { + if err := as.IsRangePermitted(ai, c.Key, c.RangeEnd); err != nil { + return err + } + } + if err := checkTxnReqsPermission(as, ai, rt.Success); err != nil { + return err + } + return checkTxnReqsPermission(as, ai, rt.Failure) +} + +func checkTxnReqsPermission(as auth.AuthStore, ai *auth.AuthInfo, reqs []*pb.RequestOp) error { + for _, requ := range reqs { + switch tv := requ.Request.(type) { + case *pb.RequestOp_RequestRange: + if tv.RequestRange == nil { + continue + } + + if err := as.IsRangePermitted(ai, tv.RequestRange.Key, tv.RequestRange.RangeEnd); err != nil { + return err + } + + case *pb.RequestOp_RequestPut: + if tv.RequestPut == nil { + continue + } + + if err := as.IsPutPermitted(ai, tv.RequestPut.Key); err != nil { + return err + } + + case *pb.RequestOp_RequestDeleteRange: + if tv.RequestDeleteRange == nil { + continue + } + + if tv.RequestDeleteRange.PrevKv { + err := as.IsRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd) + if err != nil { + return err + } + } + + err := as.IsDeleteRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd) + if err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/txn/util.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/txn/util.go new file mode 100644 index 0000000000000..f9987c6d5d1c4 --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/txn/util.go @@ -0,0 +1,107 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package txn + +import ( + "fmt" + "reflect" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "go.uber.org/zap" + + pb "go.etcd.io/etcd/api/v3/etcdserverpb" +) + +func WarnOfExpensiveRequest(lg *zap.Logger, warningApplyDuration time.Duration, now time.Time, reqStringer fmt.Stringer, respMsg proto.Message, err error) { + if time.Since(now) <= warningApplyDuration { + return + } + var resp string + if !isNil(respMsg) { + resp = fmt.Sprintf("size:%d", proto.Size(respMsg)) + } + warnOfExpensiveGenericRequest(lg, warningApplyDuration, now, reqStringer, "", resp, err) +} + +func WarnOfFailedRequest(lg *zap.Logger, now time.Time, reqStringer fmt.Stringer, respMsg proto.Message, err error) { + var resp string + if !isNil(respMsg) { + resp = fmt.Sprintf("size:%d", proto.Size(respMsg)) + } + d := time.Since(now) + lg.Warn( + "failed to apply request", + zap.Duration("took", d), + zap.String("request", reqStringer.String()), + zap.String("response", resp), + zap.Error(err), + ) +} + +func WarnOfExpensiveReadOnlyTxnRequest(lg *zap.Logger, warningApplyDuration time.Duration, now time.Time, r *pb.TxnRequest, txnResponse *pb.TxnResponse, err error) { + if time.Since(now) <= warningApplyDuration { + return + } + reqStringer := pb.NewLoggableTxnRequest(r) + var resp string + if !isNil(txnResponse) { + var resps []string + for _, r := range txnResponse.Responses { + switch r.Response.(type) { + case *pb.ResponseOp_ResponseRange: + if op := r.GetResponseRange(); op != nil { + resps = append(resps, fmt.Sprintf("range_response_count:%d", len(op.GetKvs()))) + } else { + resps = append(resps, "range_response:nil") + } + default: + // only range responses should be in a read only txn request + } + } + resp = fmt.Sprintf("responses:<%s> size:%d", strings.Join(resps, " "), txnResponse.Size()) + } + warnOfExpensiveGenericRequest(lg, warningApplyDuration, now, reqStringer, "read-only txn ", resp, err) +} + +func WarnOfExpensiveReadOnlyRangeRequest(lg *zap.Logger, warningApplyDuration time.Duration, now time.Time, reqStringer fmt.Stringer, rangeResponse *pb.RangeResponse, err error) { + if time.Since(now) <= warningApplyDuration { + return + } + var resp string + if !isNil(rangeResponse) { + resp = fmt.Sprintf("range_response_count:%d size:%d", len(rangeResponse.Kvs), rangeResponse.Size()) + } + warnOfExpensiveGenericRequest(lg, warningApplyDuration, now, reqStringer, "read-only range ", resp, err) +} + +// callers need make sure time has passed warningApplyDuration +func warnOfExpensiveGenericRequest(lg *zap.Logger, warningApplyDuration time.Duration, now time.Time, reqStringer fmt.Stringer, prefix string, resp string, err error) { + lg.Warn( + "apply request took too long", + zap.Duration("took", time.Since(now)), + zap.Duration("expected-duration", warningApplyDuration), + zap.String("prefix", prefix), + zap.String("request", reqStringer.String()), + zap.String("response", resp), + zap.Error(err), + ) + slowApplies.Inc() +} + +func isNil(msg proto.Message) bool { + return msg == nil || reflect.ValueOf(msg).IsNil() +} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/util.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/util.go index b048b86416e2c..fbba5491b0714 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/util.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/util.go @@ -16,17 +16,11 @@ package etcdserver import ( "fmt" - "reflect" - "strings" "time" - "github.com/golang/protobuf/proto" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/client/pkg/v3/types" "go.etcd.io/etcd/server/v3/etcdserver/api/membership" "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp" - - "go.uber.org/zap" ) // isConnectedToQuorumSince checks whether the local member is connected to the @@ -103,86 +97,6 @@ func (nc *notifier) notify(err error) { close(nc.c) } -func warnOfExpensiveRequest(lg *zap.Logger, warningApplyDuration time.Duration, now time.Time, reqStringer fmt.Stringer, respMsg proto.Message, err error) { - if time.Since(now) <= warningApplyDuration { - return - } - var resp string - if !isNil(respMsg) { - resp = fmt.Sprintf("size:%d", proto.Size(respMsg)) - } - warnOfExpensiveGenericRequest(lg, warningApplyDuration, now, reqStringer, "", resp, err) -} - -func warnOfFailedRequest(lg *zap.Logger, now time.Time, reqStringer fmt.Stringer, respMsg proto.Message, err error) { - var resp string - if !isNil(respMsg) { - resp = fmt.Sprintf("size:%d", proto.Size(respMsg)) - } - d := time.Since(now) - lg.Warn( - "failed to apply request", - zap.Duration("took", d), - zap.String("request", reqStringer.String()), - zap.String("response", resp), - zap.Error(err), - ) -} - -func warnOfExpensiveReadOnlyTxnRequest(lg *zap.Logger, warningApplyDuration time.Duration, now time.Time, r *pb.TxnRequest, txnResponse *pb.TxnResponse, err error) { - if time.Since(now) <= warningApplyDuration { - return - } - reqStringer := pb.NewLoggableTxnRequest(r) - var resp string - if !isNil(txnResponse) { - var resps []string - for _, r := range txnResponse.Responses { - switch op := r.Response.(type) { - case *pb.ResponseOp_ResponseRange: - if op.ResponseRange != nil { - resps = append(resps, fmt.Sprintf("range_response_count:%d", len(op.ResponseRange.Kvs))) - } else { - resps = append(resps, "range_response:nil") - } - default: - // only range responses should be in a read only txn request - } - } - resp = fmt.Sprintf("responses:<%s> size:%d", strings.Join(resps, " "), txnResponse.Size()) - } - warnOfExpensiveGenericRequest(lg, warningApplyDuration, now, reqStringer, "read-only txn ", resp, err) -} - -func warnOfExpensiveReadOnlyRangeRequest(lg *zap.Logger, warningApplyDuration time.Duration, now time.Time, reqStringer fmt.Stringer, rangeResponse *pb.RangeResponse, err error) { - if time.Since(now) <= warningApplyDuration { - return - } - var resp string - if !isNil(rangeResponse) { - resp = fmt.Sprintf("range_response_count:%d size:%d", len(rangeResponse.Kvs), rangeResponse.Size()) - } - warnOfExpensiveGenericRequest(lg, warningApplyDuration, now, reqStringer, "read-only range ", resp, err) -} - -// callers need make sure time has passed warningApplyDuration -func warnOfExpensiveGenericRequest(lg *zap.Logger, warningApplyDuration time.Duration, now time.Time, reqStringer fmt.Stringer, prefix string, resp string, err error) { - lg.Warn( - "apply request took too long", - zap.Duration("took", time.Since(now)), - zap.Duration("expected-duration", warningApplyDuration), - zap.String("prefix", prefix), - zap.String("request", reqStringer.String()), - zap.String("response", resp), - zap.Error(err), - ) - slowApplies.Inc() -} - -func isNil(msg proto.Message) bool { - return msg == nil || reflect.ValueOf(msg).IsNil() -} - // panicAlternativeStringer wraps a fmt.Stringer, and if calling String() panics, calls the alternative instead. // This is needed to ensure logging slow v2 requests does not panic, which occurs when running integration tests // with the embedded server with github.com/golang/protobuf v1.4.0+. See https://github.com/etcd-io/etcd/issues/12197. diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/v2_server.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/v2_server.go index 24c97c924a5e2..8636204b544cc 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/v2_server.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/v2_server.go @@ -15,151 +15,11 @@ package etcdserver import ( - "context" - "time" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/server/v3/etcdserver/api/membership" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" ) type RequestV2 pb.Request -type RequestV2Handler interface { - Post(ctx context.Context, r *RequestV2) (Response, error) - Put(ctx context.Context, r *RequestV2) (Response, error) - Delete(ctx context.Context, r *RequestV2) (Response, error) - QGet(ctx context.Context, r *RequestV2) (Response, error) - Get(ctx context.Context, r *RequestV2) (Response, error) - Head(ctx context.Context, r *RequestV2) (Response, error) -} - -type reqV2HandlerEtcdServer struct { - reqV2HandlerStore - s *EtcdServer -} - -type reqV2HandlerStore struct { - store v2store.Store - applier ApplierV2 -} - -func NewStoreRequestV2Handler(s v2store.Store, applier ApplierV2) RequestV2Handler { - return &reqV2HandlerStore{s, applier} -} - -func (a *reqV2HandlerStore) Post(ctx context.Context, r *RequestV2) (Response, error) { - return a.applier.Post(r), nil -} - -func (a *reqV2HandlerStore) Put(ctx context.Context, r *RequestV2) (Response, error) { - return a.applier.Put(r, membership.ApplyBoth), nil -} - -func (a *reqV2HandlerStore) Delete(ctx context.Context, r *RequestV2) (Response, error) { - return a.applier.Delete(r), nil -} - -func (a *reqV2HandlerStore) QGet(ctx context.Context, r *RequestV2) (Response, error) { - return a.applier.QGet(r), nil -} - -func (a *reqV2HandlerStore) Get(ctx context.Context, r *RequestV2) (Response, error) { - if r.Wait { - wc, err := a.store.Watch(r.Path, r.Recursive, r.Stream, r.Since) - return Response{Watcher: wc}, err - } - ev, err := a.store.Get(r.Path, r.Recursive, r.Sorted) - return Response{Event: ev}, err -} - -func (a *reqV2HandlerStore) Head(ctx context.Context, r *RequestV2) (Response, error) { - ev, err := a.store.Get(r.Path, r.Recursive, r.Sorted) - return Response{Event: ev}, err -} - -func (a *reqV2HandlerEtcdServer) Post(ctx context.Context, r *RequestV2) (Response, error) { - return a.processRaftRequest(ctx, r) -} - -func (a *reqV2HandlerEtcdServer) Put(ctx context.Context, r *RequestV2) (Response, error) { - return a.processRaftRequest(ctx, r) -} - -func (a *reqV2HandlerEtcdServer) Delete(ctx context.Context, r *RequestV2) (Response, error) { - return a.processRaftRequest(ctx, r) -} - -func (a *reqV2HandlerEtcdServer) QGet(ctx context.Context, r *RequestV2) (Response, error) { - return a.processRaftRequest(ctx, r) -} - -func (a *reqV2HandlerEtcdServer) processRaftRequest(ctx context.Context, r *RequestV2) (Response, error) { - data, err := ((*pb.Request)(r)).Marshal() - if err != nil { - return Response{}, err - } - ch := a.s.w.Register(r.ID) - - start := time.Now() - a.s.r.Propose(ctx, data) - proposalsPending.Inc() - defer proposalsPending.Dec() - - select { - case x := <-ch: - resp := x.(Response) - return resp, resp.Err - case <-ctx.Done(): - proposalsFailed.Inc() - a.s.w.Trigger(r.ID, nil) // GC wait - return Response{}, a.s.parseProposeCtxErr(ctx.Err(), start) - case <-a.s.stopping: - } - return Response{}, ErrStopped -} - -func (s *EtcdServer) Do(ctx context.Context, r pb.Request) (Response, error) { - r.ID = s.reqIDGen.Next() - h := &reqV2HandlerEtcdServer{ - reqV2HandlerStore: reqV2HandlerStore{ - store: s.v2store, - applier: s.applyV2, - }, - s: s, - } - rp := &r - resp, err := ((*RequestV2)(rp)).Handle(ctx, h) - resp.Term, resp.Index = s.Term(), s.CommittedIndex() - return resp, err -} - -// Handle interprets r and performs an operation on s.store according to r.Method -// and other fields. If r.Method is "POST", "PUT", "DELETE", or a "GET" with -// Quorum == true, r will be sent through consensus before performing its -// respective operation. Do will block until an action is performed or there is -// an error. -func (r *RequestV2) Handle(ctx context.Context, v2api RequestV2Handler) (Response, error) { - if r.Method == "GET" && r.Quorum { - r.Method = "QGET" - } - switch r.Method { - case "POST": - return v2api.Post(ctx, r) - case "PUT": - return v2api.Put(ctx, r) - case "DELETE": - return v2api.Delete(ctx, r) - case "QGET": - return v2api.QGet(ctx, r) - case "GET": - return v2api.Get(ctx, r) - case "HEAD": - return v2api.Head(ctx, r) - } - return Response{}, ErrUnknownMethod -} - func (r *RequestV2) String() string { rpb := pb.Request(*r) return rpb.String() diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/v3_server.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/v3_server.go index a9e1f3586afe4..803f931f5f78b 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/v3_server.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/v3_server.go @@ -19,28 +19,32 @@ import ( "context" "encoding/base64" "encoding/binary" + errorspkg "errors" "strconv" "time" + "github.com/gogo/protobuf/proto" + "go.uber.org/zap" + "golang.org/x/crypto/bcrypt" + pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/membershippb" + "go.etcd.io/etcd/api/v3/version" "go.etcd.io/etcd/pkg/v3/traceutil" - "go.etcd.io/etcd/raft/v3" "go.etcd.io/etcd/server/v3/auth" "go.etcd.io/etcd/server/v3/etcdserver/api/membership" + apply2 "go.etcd.io/etcd/server/v3/etcdserver/apply" + "go.etcd.io/etcd/server/v3/etcdserver/errors" + "go.etcd.io/etcd/server/v3/etcdserver/txn" "go.etcd.io/etcd/server/v3/lease" "go.etcd.io/etcd/server/v3/lease/leasehttp" - "go.etcd.io/etcd/server/v3/mvcc" - - "github.com/gogo/protobuf/proto" - "go.uber.org/zap" - "golang.org/x/crypto/bcrypt" + "go.etcd.io/etcd/server/v3/storage/mvcc" + "go.etcd.io/raft/v3" ) const ( // In the health case, there might be a small gap (10s of entries) between // the applied index and committed index. - // However, if the committed entries are very heavy to apply, the gap might grow. + // However, if the committed entries are very heavy to toApply, the gap might grow. // We should stop accepting new proposals if the gap growing to a certain point. maxGapBetweenApplyAndCommitIndex = 5000 traceThreshold = 100 * time.Millisecond @@ -60,9 +64,9 @@ type RaftKV interface { } type Lessor interface { - // LeaseGrant sends LeaseGrant request to raft and apply it after committed. + // LeaseGrant sends LeaseGrant request to raft and toApply it after committed. LeaseGrant(ctx context.Context, r *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) - // LeaseRevoke sends LeaseRevoke request to raft and apply it after committed. + // LeaseRevoke sends LeaseRevoke request to raft and toApply it after committed. LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) // LeaseRenew renews the lease with given ID. The renewed TTL is returned. Or an error @@ -102,12 +106,12 @@ func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeRe traceutil.Field{Key: "range_begin", Value: string(r.Key)}, traceutil.Field{Key: "range_end", Value: string(r.RangeEnd)}, ) - ctx = context.WithValue(ctx, traceutil.TraceKey, trace) + ctx = context.WithValue(ctx, traceutil.TraceKey{}, trace) var resp *pb.RangeResponse var err error defer func(start time.Time) { - warnOfExpensiveReadOnlyRangeRequest(s.Logger(), s.Cfg.WarningApplyDuration, start, r, resp, err) + txn.WarnOfExpensiveReadOnlyRangeRequest(s.Logger(), s.Cfg.WarningApplyDuration, start, r, resp, err) if resp != nil { trace.AddField( traceutil.Field{Key: "response_count", Value: len(resp.Kvs)}, @@ -128,7 +132,7 @@ func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeRe return s.authStore.IsRangePermitted(ai, r.Key, r.RangeEnd) } - get := func() { resp, err = s.applyV3Base.Range(ctx, nil, r) } + get := func() { resp, _, err = txn.Range(ctx, s.Logger(), s.KV(), r) } if serr := s.doSerialize(ctx, chk, get); serr != nil { err = serr return nil, err @@ -137,7 +141,7 @@ func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeRe } func (s *EtcdServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) { - ctx = context.WithValue(ctx, traceutil.StartTimeKey, time.Now()) + ctx = context.WithValue(ctx, traceutil.StartTimeKey{}, time.Now()) resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Put: r}) if err != nil { return nil, err @@ -154,13 +158,13 @@ func (s *EtcdServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) } func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) { - if isTxnReadonly(r) { + if txn.IsTxnReadonly(r) { trace := traceutil.New("transaction", s.Logger(), traceutil.Field{Key: "read_only", Value: true}, ) - ctx = context.WithValue(ctx, traceutil.TraceKey, trace) - if !isTxnSerializable(r) { + ctx = context.WithValue(ctx, traceutil.TraceKey{}, trace) + if !txn.IsTxnSerializable(r) { err := s.linearizableReadNotify(ctx) trace.Step("agreement among raft nodes before linearized reading") if err != nil { @@ -170,22 +174,24 @@ func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse var resp *pb.TxnResponse var err error chk := func(ai *auth.AuthInfo) error { - return checkTxnAuth(s.authStore, ai, r) + return txn.CheckTxnAuth(s.authStore, ai, r) } defer func(start time.Time) { - warnOfExpensiveReadOnlyTxnRequest(s.Logger(), s.Cfg.WarningApplyDuration, start, r, resp, err) + txn.WarnOfExpensiveReadOnlyTxnRequest(s.Logger(), s.Cfg.WarningApplyDuration, start, r, resp, err) trace.LogIfLong(traceThreshold) }(time.Now()) - get := func() { resp, _, err = s.applyV3Base.Txn(ctx, r) } + get := func() { + resp, _, err = txn.Txn(ctx, s.Logger(), r, s.Cfg.ExperimentalTxnModeWriteWithSharedBuffer, s.KV(), s.lessor) + } if serr := s.doSerialize(ctx, chk, get); serr != nil { return nil, serr } return resp, err } - ctx = context.WithValue(ctx, traceutil.StartTimeKey, time.Now()) + ctx = context.WithValue(ctx, traceutil.StartTimeKey{}, time.Now()) resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Txn: r}) if err != nil { return nil, err @@ -193,64 +199,40 @@ func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse return resp.(*pb.TxnResponse), nil } -func isTxnSerializable(r *pb.TxnRequest) bool { - for _, u := range r.Success { - if r := u.GetRequestRange(); r == nil || !r.Serializable { - return false - } - } - for _, u := range r.Failure { - if r := u.GetRequestRange(); r == nil || !r.Serializable { - return false - } - } - return true -} - -func isTxnReadonly(r *pb.TxnRequest) bool { - for _, u := range r.Success { - if r := u.GetRequestRange(); r == nil { - return false - } - } - for _, u := range r.Failure { - if r := u.GetRequestRange(); r == nil { - return false - } - } - return true -} - func (s *EtcdServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) { startTime := time.Now() result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{Compaction: r}) trace := traceutil.TODO() - if result != nil && result.trace != nil { - trace = result.trace + if result != nil && result.Trace != nil { + trace = result.Trace defer func() { trace.LogIfLong(traceThreshold) }() - applyStart := result.trace.GetStartTime() - result.trace.SetStartTime(startTime) + applyStart := result.Trace.GetStartTime() + result.Trace.SetStartTime(startTime) trace.InsertStep(0, applyStart, "process raft request") } - if r.Physical && result != nil && result.physc != nil { - <-result.physc + if r.Physical && result != nil && result.Physc != nil { + <-result.Physc // The compaction is done deleting keys; the hash is now settled // but the data is not necessarily committed. If there's a crash, // the hash may revert to a hash prior to compaction completing // if the compaction resumes. Force the finished compaction to // commit so it won't resume following a crash. + // + // `applySnapshot` sets a new backend instance, so we need to acquire the bemu lock. + s.bemu.RLock() s.be.ForceCommit() - trace.Step("physically apply compaction") + s.bemu.RUnlock() + trace.Step("physically toApply compaction") } if err != nil { return nil, err } - if result.err != nil { - return nil, result.err + if result.Err != nil { + return nil, result.Err } - resp := result.resp.(*pb.CompactionResponse) + resp := result.Resp.(*pb.CompactionResponse) if resp == nil { resp = &pb.CompactionResponse{} } @@ -279,9 +261,9 @@ func (s *EtcdServer) waitAppliedIndex() error { select { case <-s.ApplyWait(): case <-s.stopping: - return ErrStopped + return errors.ErrStopped case <-time.After(applyTimeout): - return ErrTimeoutWaitAppliedIndex + return errors.ErrTimeoutWaitAppliedIndex } return nil @@ -307,7 +289,6 @@ func (s *EtcdServer) LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, e if !s.ensureLeadership() { return -1, lease.ErrNotPrimary } - if err := s.waitAppliedIndex(); err != nil { return 0, err } @@ -316,7 +297,7 @@ func (s *EtcdServer) LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, e if err == nil { // already requested to primary lessor(leader) return ttl, nil } - if err != lease.ErrNotPrimary { + if !errorspkg.Is(err, lease.ErrNotPrimary) { return -1, err } } @@ -333,7 +314,7 @@ func (s *EtcdServer) LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, e for _, url := range leader.PeerURLs { lurl := url + leasehttp.LeasePrefix ttl, err := leasehttp.RenewHTTP(cctx, id, lurl, s.peerRt) - if err == nil || err == lease.ErrLeaseNotFound { + if err == nil || errorspkg.Is(err, lease.ErrLeaseNotFound) { return ttl, err } } @@ -341,10 +322,10 @@ func (s *EtcdServer) LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, e time.Sleep(50 * time.Millisecond) } - if cctx.Err() == context.DeadlineExceeded { - return -1, ErrTimeout + if errorspkg.Is(cctx.Err(), context.DeadlineExceeded) { + return -1, errors.ErrTimeout } - return -1, ErrCanceled + return -1, errors.ErrCanceled } func (s *EtcdServer) checkLeaseTimeToLive(ctx context.Context, leaseID lease.LeaseID) (uint64, error) { @@ -402,7 +383,7 @@ func (s *EtcdServer) leaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveR if le.Demoted() { // NOTE: lease.ErrNotPrimary is not retryable error for // client. Instead, uses ErrLeaderChanged. - return nil, ErrLeaderChanged + return nil, errors.ErrLeaderChanged } return resp, nil } @@ -422,16 +403,16 @@ func (s *EtcdServer) leaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveR if err == nil { return resp.LeaseTimeToLiveResponse, nil } - if err == lease.ErrLeaseNotFound { + if errorspkg.Is(err, lease.ErrLeaseNotFound) { return nil, err } } } - if cctx.Err() == context.DeadlineExceeded { - return nil, ErrTimeout + if errorspkg.Is(cctx.Err(), context.DeadlineExceeded) { + return nil, errors.ErrTimeout } - return nil, ErrCanceled + return nil, errors.ErrCanceled } func (s *EtcdServer) LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) { @@ -458,14 +439,23 @@ func (s *EtcdServer) LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveR return resp, nil } +func (s *EtcdServer) newHeader() *pb.ResponseHeader { + return &pb.ResponseHeader{ + ClusterId: uint64(s.cluster.ID()), + MemberId: uint64(s.MemberID()), + Revision: s.KV().Rev(), + RaftTerm: s.Term(), + } +} + // LeaseLeases is really ListLeases !??? -func (s *EtcdServer) LeaseLeases(ctx context.Context, r *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error) { +func (s *EtcdServer) LeaseLeases(_ context.Context, _ *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error) { ls := s.lessor.Leases() lss := make([]*pb.LeaseStatus, len(ls)) for i := range ls { lss[i] = &pb.LeaseStatus{ID: int64(ls[i].ID)} } - return &pb.LeaseLeasesResponse{Header: newHeader(s), Leases: lss}, nil + return &pb.LeaseLeasesResponse{Header: s.newHeader(), Leases: lss}, nil } func (s *EtcdServer) waitLeader(ctx context.Context) (*membership.Member, error) { @@ -477,13 +467,13 @@ func (s *EtcdServer) waitLeader(ctx context.Context) (*membership.Member, error) case <-time.After(dur): leader = s.cluster.Member(s.Leader()) case <-s.stopping: - return nil, ErrStopped + return nil, errors.ErrStopped case <-ctx.Done(): - return nil, ErrNoLeader + return nil, errors.ErrNoLeader } } - if leader == nil || len(leader.PeerURLs) == 0 { - return nil, ErrNoLeader + if len(leader.PeerURLs) == 0 { + return nil, errors.ErrNoLeader } return leader, nil } @@ -538,7 +528,7 @@ func (s *EtcdServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest for { checkedRevision, err := s.AuthStore().CheckPassword(r.Name, r.Password) if err != nil { - if err != auth.ErrAuthNotEnabled { + if !errorspkg.Is(err, auth.ErrAuthNotEnabled) { lg.Warn( "invalid authentication was requested", zap.String("user", r.Name), @@ -701,19 +691,19 @@ func (s *EtcdServer) raftRequestOnce(ctx context.Context, r pb.InternalRaftReque if err != nil { return nil, err } - if result.err != nil { - return nil, result.err + if result.Err != nil { + return nil, result.Err } - if startTime, ok := ctx.Value(traceutil.StartTimeKey).(time.Time); ok && result.trace != nil { - applyStart := result.trace.GetStartTime() - // The trace object is created in apply. Here reset the start time to trace + if startTime, ok := ctx.Value(traceutil.StartTimeKey{}).(time.Time); ok && result.Trace != nil { + applyStart := result.Trace.GetStartTime() + // The trace object is created in toApply. Here reset the start time to trace // the raft request time by the difference between the request start time - // and apply start time - result.trace.SetStartTime(startTime) - result.trace.InsertStep(0, applyStart, "process raft request") - result.trace.LogIfLong(traceThreshold) + // and toApply start time + result.Trace.SetStartTime(startTime) + result.Trace.InsertStep(0, applyStart, "process raft request") + result.Trace.LogIfLong(traceThreshold) } - return result.resp, nil + return result.Resp, nil } func (s *EtcdServer) raftRequest(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) { @@ -745,11 +735,11 @@ func (s *EtcdServer) doSerialize(ctx context.Context, chk func(*auth.AuthInfo) e return nil } -func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.InternalRaftRequest) (*applyResult, error) { +func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.InternalRaftRequest) (*apply2.Result, error) { ai := s.getAppliedIndex() ci := s.getCommittedIndex() if ci > ai+maxGapBetweenApplyAndCommitIndex { - return nil, ErrTooManyRequests + return nil, errors.ErrTooManyRequests } r.Header = &pb.RequestHeader{ @@ -774,7 +764,7 @@ func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.In } if len(data) > int(s.Cfg.MaxRequestBytes) { - return nil, ErrRequestTooLarge + return nil, errors.ErrRequestTooLarge } id := r.ID @@ -798,13 +788,13 @@ func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.In select { case x := <-ch: - return x.(*applyResult), nil + return x.(*apply2.Result), nil case <-cctx.Done(): proposalsFailed.Inc() s.w.Trigger(id, nil) // GC wait return nil, s.parseProposeCtxErr(cctx.Err(), start) case <-s.done: - return nil, ErrStopped + return nil, errors.ErrStopped } } @@ -813,8 +803,8 @@ func (s *EtcdServer) Watchable() mvcc.WatchableKV { return s.KV() } func (s *EtcdServer) linearizableReadLoop() { for { - requestId := s.reqIDGen.Next() - leaderChangedNotifier := s.LeaderChangedNotify() + requestID := s.reqIDGen.Next() + leaderChangedNotifier := s.leaderChanged.Receive() select { case <-leaderChangedNotifier: continue @@ -833,7 +823,7 @@ func (s *EtcdServer) linearizableReadLoop() { s.readNotifier = nextnr s.readMu.Unlock() - confirmedIndex, err := s.requestCurrentIndex(leaderChangedNotifier, requestId) + confirmedIndex, err := s.requestCurrentIndex(leaderChangedNotifier, requestID) if isStopped(err) { return } @@ -865,11 +855,11 @@ func (s *EtcdServer) linearizableReadLoop() { } func isStopped(err error) bool { - return err == raft.ErrStopped || err == ErrStopped + return errorspkg.Is(err, raft.ErrStopped) || errorspkg.Is(err, errors.ErrStopped) } -func (s *EtcdServer) requestCurrentIndex(leaderChangedNotifier <-chan struct{}, requestId uint64) (uint64, error) { - err := s.sendReadIndex(requestId) +func (s *EtcdServer) requestCurrentIndex(leaderChangedNotifier <-chan struct{}, requestID uint64) (uint64, error) { + err := s.sendReadIndex(requestID) if err != nil { return 0, err } @@ -880,24 +870,24 @@ func (s *EtcdServer) requestCurrentIndex(leaderChangedNotifier <-chan struct{}, retryTimer := time.NewTimer(readIndexRetryTime) defer retryTimer.Stop() - firstCommitInTermNotifier := s.FirstCommitInTermNotify() + firstCommitInTermNotifier := s.firstCommitInTerm.Receive() for { select { case rs := <-s.r.readStateC: - requestIdBytes := uint64ToBigEndianBytes(requestId) - gotOwnResponse := bytes.Equal(rs.RequestCtx, requestIdBytes) + requestIDBytes := uint64ToBigEndianBytes(requestID) + gotOwnResponse := bytes.Equal(rs.RequestCtx, requestIDBytes) if !gotOwnResponse { // a previous request might time out. now we should ignore the response of it and // continue waiting for the response of the current requests. - responseId := uint64(0) + responseID := uint64(0) if len(rs.RequestCtx) == 8 { - responseId = binary.BigEndian.Uint64(rs.RequestCtx) + responseID = binary.BigEndian.Uint64(rs.RequestCtx) } lg.Warn( "ignored out-of-date read index response; local node read indexes queueing up and waiting to be in sync with leader", - zap.Uint64("sent-request-id", requestId), - zap.Uint64("received-request-id", responseId), + zap.Uint64("sent-request-id", requestID), + zap.Uint64("received-request-id", responseID), ) slowReadIndex.Inc() continue @@ -906,11 +896,11 @@ func (s *EtcdServer) requestCurrentIndex(leaderChangedNotifier <-chan struct{}, case <-leaderChangedNotifier: readIndexFailed.Inc() // return a retryable error. - return 0, ErrLeaderChanged + return 0, errors.ErrLeaderChanged case <-firstCommitInTermNotifier: - firstCommitInTermNotifier = s.FirstCommitInTermNotify() + firstCommitInTermNotifier = s.firstCommitInTerm.Receive() lg.Info("first commit in current term: resending ReadIndex request") - err := s.sendReadIndex(requestId) + err := s.sendReadIndex(requestID) if err != nil { return 0, err } @@ -919,10 +909,10 @@ func (s *EtcdServer) requestCurrentIndex(leaderChangedNotifier <-chan struct{}, case <-retryTimer.C: lg.Warn( "waiting for ReadIndex response took too long, retrying", - zap.Uint64("sent-request-id", requestId), + zap.Uint64("sent-request-id", requestID), zap.Duration("retry-timeout", readIndexRetryTime), ) - err := s.sendReadIndex(requestId) + err := s.sendReadIndex(requestID) if err != nil { return 0, err } @@ -934,9 +924,9 @@ func (s *EtcdServer) requestCurrentIndex(leaderChangedNotifier <-chan struct{}, zap.Duration("timeout", s.Cfg.ReqTimeout()), ) slowReadIndex.Inc() - return 0, ErrTimeout + return 0, errors.ErrTimeout case <-s.stopping: - return 0, ErrStopped + return 0, errors.ErrStopped } } } @@ -953,7 +943,7 @@ func (s *EtcdServer) sendReadIndex(requestIndex uint64) error { cctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout()) err := s.r.ReadIndex(cctx, ctxToSend) cancel() - if err == raft.ErrStopped { + if errorspkg.Is(err, raft.ErrStopped) { return err } if err != nil { @@ -987,7 +977,7 @@ func (s *EtcdServer) linearizableReadNotify(ctx context.Context) error { case <-ctx.Done(): return ctx.Err() case <-s.done: - return ErrStopped + return errors.ErrStopped } } @@ -1012,7 +1002,7 @@ func (s *EtcdServer) Downgrade(ctx context.Context, r *pb.DowngradeRequest) (*pb case pb.DowngradeRequest_CANCEL: return s.downgradeCancel(ctx) default: - return nil, ErrUnknownMethod + return nil, errors.ErrUnknownMethod } } @@ -1024,73 +1014,40 @@ func (s *EtcdServer) downgradeValidate(ctx context.Context, v string) (*pb.Downg return nil, err } - // gets leaders commit index and wait for local store to finish applying that index - // to avoid using stale downgrade information - err = s.linearizableReadNotify(ctx) - if err != nil { - return nil, err - } - cv := s.ClusterVersion() if cv == nil { - return nil, ErrClusterVersionUnavailable + return nil, errors.ErrClusterVersionUnavailable } - resp.Version = cv.String() - - allowedTargetVersion := membership.AllowedDowngradeVersion(cv) - if !targetVersion.Equal(*allowedTargetVersion) { - return nil, ErrInvalidDowngradeTargetVersion + resp.Version = version.Cluster(cv.String()) + err = s.Version().DowngradeValidate(ctx, targetVersion) + if err != nil { + return nil, err } - downgradeInfo := s.cluster.DowngradeInfo() - if downgradeInfo.Enabled { - // Todo: return the downgrade status along with the error msg - return nil, ErrDowngradeInProcess - } return resp, nil } func (s *EtcdServer) downgradeEnable(ctx context.Context, r *pb.DowngradeRequest) (*pb.DowngradeResponse, error) { - // validate downgrade capability before starting downgrade - v := r.Version lg := s.Logger() - if resp, err := s.downgradeValidate(ctx, v); err != nil { - lg.Warn("reject downgrade request", zap.Error(err)) - return resp, err - } - targetVersion, err := convertToClusterVersion(v) + targetVersion, err := convertToClusterVersion(r.Version) if err != nil { lg.Warn("reject downgrade request", zap.Error(err)) return nil, err } - - raftRequest := membershippb.DowngradeInfoSetRequest{Enabled: true, Ver: targetVersion.String()} - _, err = s.raftRequest(ctx, pb.InternalRaftRequest{DowngradeInfoSet: &raftRequest}) + err = s.Version().DowngradeEnable(ctx, targetVersion) if err != nil { lg.Warn("reject downgrade request", zap.Error(err)) return nil, err } - resp := pb.DowngradeResponse{Version: s.ClusterVersion().String()} + resp := pb.DowngradeResponse{Version: version.Cluster(s.ClusterVersion().String())} return &resp, nil } func (s *EtcdServer) downgradeCancel(ctx context.Context) (*pb.DowngradeResponse, error) { - // gets leaders commit index and wait for local store to finish applying that index - // to avoid using stale downgrade information - if err := s.linearizableReadNotify(ctx); err != nil { - return nil, err - } - - downgradeInfo := s.cluster.DowngradeInfo() - if !downgradeInfo.Enabled { - return nil, ErrNoInflightDowngrade - } - - raftRequest := membershippb.DowngradeInfoSetRequest{Enabled: false} - _, err := s.raftRequest(ctx, pb.InternalRaftRequest{DowngradeInfoSet: &raftRequest}) + err := s.Version().DowngradeCancel(ctx) if err != nil { - return nil, err + s.lg.Warn("failed to cancel downgrade", zap.Error(err)) } - resp := pb.DowngradeResponse{Version: s.ClusterVersion().String()} + resp := pb.DowngradeResponse{Version: version.Cluster(s.ClusterVersion().String())} return &resp, nil } diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2http/doc.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/version/doc.go similarity index 81% rename from vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2http/doc.go rename to vendor/go.etcd.io/etcd/server/v3/etcdserver/version/doc.go index 475c4b1f95a3b..c34f905119584 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2http/doc.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/version/doc.go @@ -1,4 +1,4 @@ -// Copyright 2015 The etcd Authors +// Copyright 2021 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,5 +12,5 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package v2http provides etcd client and server implementations. -package v2http +// Package version provides functions for getting/saving storage version. +package version diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/downgrade.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/version/downgrade.go similarity index 60% rename from vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/downgrade.go rename to vendor/go.etcd.io/etcd/server/v3/etcdserver/version/downgrade.go index 9fdafe22aae9b..f2c6e11946796 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/downgrade.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/version/downgrade.go @@ -12,12 +12,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -package membership +package version import ( "github.com/coreos/go-semver/semver" - "go.etcd.io/etcd/api/v3/version" "go.uber.org/zap" + + "go.etcd.io/etcd/api/v3/version" ) type DowngradeInfo struct { @@ -34,47 +35,42 @@ func (d *DowngradeInfo) GetTargetVersion() *semver.Version { // isValidDowngrade verifies whether the cluster can be downgraded from verFrom to verTo func isValidDowngrade(verFrom *semver.Version, verTo *semver.Version) bool { - return verTo.Equal(*AllowedDowngradeVersion(verFrom)) + return verTo.Equal(*allowedDowngradeVersion(verFrom)) } -// mustDetectDowngrade will detect unexpected downgrade when the local server is recovered. -func mustDetectDowngrade(lg *zap.Logger, cv *semver.Version, d *DowngradeInfo) { - lv := semver.Must(semver.NewVersion(version.Version)) +// MustDetectDowngrade will detect local server joining cluster that doesn't support it's version. +func MustDetectDowngrade(lg *zap.Logger, sv, cv *semver.Version) { // only keep major.minor version for comparison against cluster version - lv = &semver.Version{Major: lv.Major, Minor: lv.Minor} - - // if the cluster enables downgrade, check local version against downgrade target version. - if d != nil && d.Enabled && d.TargetVersion != "" { - if lv.Equal(*d.GetTargetVersion()) { - if cv != nil { - lg.Info( - "cluster is downgrading to target version", - zap.String("target-cluster-version", d.TargetVersion), - zap.String("determined-cluster-version", version.Cluster(cv.String())), - zap.String("current-server-version", version.Version), - ) - } - return - } - lg.Fatal( - "invalid downgrade; server version is not allowed to join when downgrade is enabled", - zap.String("current-server-version", version.Version), - zap.String("target-cluster-version", d.TargetVersion), - ) - } + sv = &semver.Version{Major: sv.Major, Minor: sv.Minor} // if the cluster disables downgrade, check local version against determined cluster version. // the validation passes when local version is not less than cluster version - if cv != nil && lv.LessThan(*cv) { - lg.Fatal( + if cv != nil && sv.LessThan(*cv) { + lg.Panic( "invalid downgrade; server version is lower than determined cluster version", - zap.String("current-server-version", version.Version), + zap.String("current-server-version", sv.String()), zap.String("determined-cluster-version", version.Cluster(cv.String())), ) } } -func AllowedDowngradeVersion(ver *semver.Version) *semver.Version { +func allowedDowngradeVersion(ver *semver.Version) *semver.Version { // Todo: handle the case that downgrading from higher major version(e.g. downgrade from v4.0 to v3.x) return &semver.Version{Major: ver.Major, Minor: ver.Minor - 1} } + +// IsValidClusterVersionChange checks the two scenario when version is valid to change: +// 1. Downgrade: cluster version is 1 minor version higher than local version, +// cluster version should change. +// 2. Cluster start: when not all members version are available, cluster version +// is set to MinVersion(3.0), when all members are at higher version, cluster version +// is lower than minimal server version, cluster version should change +func IsValidClusterVersionChange(verFrom *semver.Version, verTo *semver.Version) bool { + verFrom = &semver.Version{Major: verFrom.Major, Minor: verFrom.Minor} + verTo = &semver.Version{Major: verTo.Major, Minor: verTo.Minor} + + if isValidDowngrade(verFrom, verTo) || (verFrom.Major == verTo.Major && verFrom.LessThan(*verTo)) { + return true + } + return false +} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2v3/doc.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/version/errors.go similarity index 60% rename from vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2v3/doc.go rename to vendor/go.etcd.io/etcd/server/v3/etcdserver/version/errors.go index 2ff372f18760b..906aa9f413f72 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2v3/doc.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/version/errors.go @@ -1,4 +1,4 @@ -// Copyright 2017 The etcd Authors +// Copyright 2021 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,5 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package v2v3 provides a ServerV2 implementation backed by clientv3.Client. -package v2v3 +package version + +import "errors" + +var ( + ErrInvalidDowngradeTargetVersion = errors.New("etcdserver: invalid downgrade target version") + ErrDowngradeInProcess = errors.New("etcdserver: cluster has a downgrade job in progress") + ErrNoInflightDowngrade = errors.New("etcdserver: no inflight downgrade job") +) diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/version/monitor.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/version/monitor.go new file mode 100644 index 0000000000000..15697e960c265 --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/version/monitor.go @@ -0,0 +1,221 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import ( + "context" + "errors" + + "github.com/coreos/go-semver/semver" + "go.uber.org/zap" + + "go.etcd.io/etcd/api/v3/version" +) + +// Monitor contains logic used by cluster leader to monitor version changes and decide on cluster version or downgrade progress. +type Monitor struct { + lg *zap.Logger + s Server +} + +// Server lists EtcdServer methods needed by Monitor +type Server interface { + GetClusterVersion() *semver.Version + GetDowngradeInfo() *DowngradeInfo + GetMembersVersions() map[string]*version.Versions + UpdateClusterVersion(string) + LinearizableReadNotify(ctx context.Context) error + DowngradeEnable(ctx context.Context, targetVersion *semver.Version) error + DowngradeCancel(ctx context.Context) error + + GetStorageVersion() *semver.Version + UpdateStorageVersion(semver.Version) error +} + +func NewMonitor(lg *zap.Logger, storage Server) *Monitor { + return &Monitor{ + lg: lg, + s: storage, + } +} + +// UpdateClusterVersionIfNeeded updates the cluster version. +func (m *Monitor) UpdateClusterVersionIfNeeded() error { + newClusterVersion, err := m.decideClusterVersion() + if newClusterVersion != nil { + newClusterVersion = &semver.Version{Major: newClusterVersion.Major, Minor: newClusterVersion.Minor} + m.s.UpdateClusterVersion(newClusterVersion.String()) + } + return err +} + +// decideClusterVersion decides whether to change cluster version and its next value. +// New cluster version is based on the members versions server and whether cluster is downgrading. +// Returns nil if cluster version should be left unchanged. +func (m *Monitor) decideClusterVersion() (*semver.Version, error) { + clusterVersion := m.s.GetClusterVersion() + minimalServerVersion := m.membersMinimalServerVersion() + if clusterVersion == nil { + if minimalServerVersion != nil { + return minimalServerVersion, nil + } + return semver.New(version.MinClusterVersion), nil + } + if minimalServerVersion == nil { + return nil, nil + } + downgrade := m.s.GetDowngradeInfo() + if downgrade != nil && downgrade.Enabled { + if downgrade.GetTargetVersion().Equal(*clusterVersion) { + return nil, nil + } + if !isValidDowngrade(clusterVersion, downgrade.GetTargetVersion()) { + m.lg.Error("Cannot downgrade from cluster-version to downgrade-target", + zap.String("downgrade-target", downgrade.TargetVersion), + zap.String("cluster-version", clusterVersion.String()), + ) + return nil, errors.New("invalid downgrade target") + } + if !isValidDowngrade(minimalServerVersion, downgrade.GetTargetVersion()) { + m.lg.Error("Cannot downgrade from minimal-server-version to downgrade-target", + zap.String("downgrade-target", downgrade.TargetVersion), + zap.String("minimal-server-version", minimalServerVersion.String()), + ) + return nil, errors.New("invalid downgrade target") + } + return downgrade.GetTargetVersion(), nil + } + if clusterVersion.LessThan(*minimalServerVersion) && IsValidClusterVersionChange(clusterVersion, minimalServerVersion) { + return minimalServerVersion, nil + } + return nil, nil +} + +// UpdateStorageVersionIfNeeded updates the storage version if it differs from cluster version. +func (m *Monitor) UpdateStorageVersionIfNeeded() { + cv := m.s.GetClusterVersion() + if cv == nil { + return + } + sv := m.s.GetStorageVersion() + + if sv == nil || sv.Major != cv.Major || sv.Minor != cv.Minor { + if sv != nil { + m.lg.Info("cluster version differs from storage version.", zap.String("cluster-version", cv.String()), zap.String("storage-version", sv.String())) + } + err := m.s.UpdateStorageVersion(semver.Version{Major: cv.Major, Minor: cv.Minor}) + if err != nil { + m.lg.Error("failed to update storage version", zap.String("cluster-version", cv.String()), zap.Error(err)) + return + } + d := m.s.GetDowngradeInfo() + if d != nil && d.Enabled { + m.lg.Info( + "The server is ready to downgrade", + zap.String("target-version", d.TargetVersion), + zap.String("server-version", version.Version), + ) + } + } +} + +func (m *Monitor) CancelDowngradeIfNeeded() { + d := m.s.GetDowngradeInfo() + if d == nil || !d.Enabled { + return + } + + targetVersion := d.TargetVersion + v := semver.Must(semver.NewVersion(targetVersion)) + if m.versionsMatchTarget(v) { + m.lg.Info("the cluster has been downgraded", zap.String("cluster-version", targetVersion)) + err := m.s.DowngradeCancel(context.Background()) + if err != nil { + m.lg.Warn("failed to cancel downgrade", zap.Error(err)) + } + } +} + +// membersMinimalServerVersion returns the min server version in the map, or nil if the min +// version in unknown. +// It prints out log if there is a member with a higher version than the +// local version. +func (m *Monitor) membersMinimalServerVersion() *semver.Version { + vers := m.s.GetMembersVersions() + var minV *semver.Version + lv := semver.Must(semver.NewVersion(version.Version)) + + for mid, ver := range vers { + if ver == nil { + return nil + } + v, err := semver.NewVersion(ver.Server) + if err != nil { + m.lg.Warn( + "failed to parse server version of remote member", + zap.String("remote-member-id", mid), + zap.String("remote-member-version", ver.Server), + zap.Error(err), + ) + return nil + } + if lv.LessThan(*v) { + m.lg.Warn( + "leader found higher-versioned member", + zap.String("local-member-version", lv.String()), + zap.String("remote-member-id", mid), + zap.String("remote-member-version", ver.Server), + ) + } + if minV == nil { + minV = v + } else if v.LessThan(*minV) { + minV = v + } + } + return minV +} + +// versionsMatchTarget returns true if all server versions are equal to target version, otherwise return false. +// It can be used to decide the whether the cluster finishes downgrading to target version. +func (m *Monitor) versionsMatchTarget(targetVersion *semver.Version) bool { + vers := m.s.GetMembersVersions() + targetVersion = &semver.Version{Major: targetVersion.Major, Minor: targetVersion.Minor} + for mid, ver := range vers { + if ver == nil { + return false + } + v, err := semver.NewVersion(ver.Server) + if err != nil { + m.lg.Warn( + "failed to parse server version of remote member", + zap.String("remote-member-id", mid), + zap.String("remote-member-version", ver.Server), + zap.Error(err), + ) + return false + } + v = &semver.Version{Major: v.Major, Minor: v.Minor} + if !targetVersion.Equal(*v) { + m.lg.Warn("remotes server has mismatching etcd version", + zap.String("remote-member-id", mid), + zap.String("current-server-version", v.String()), + zap.String("target-version", targetVersion.String()), + ) + return false + } + } + return true +} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/version/version.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/version/version.go new file mode 100644 index 0000000000000..0a2f99a1fafd4 --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/version/version.go @@ -0,0 +1,81 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import ( + "context" + + "github.com/coreos/go-semver/semver" + "go.uber.org/zap" +) + +// Manager contains logic to manage etcd cluster version downgrade process. +type Manager struct { + lg *zap.Logger + s Server +} + +// NewManager returns a new manager instance +func NewManager(lg *zap.Logger, s Server) *Manager { + return &Manager{ + lg: lg, + s: s, + } +} + +// DowngradeValidate validates if cluster is downloadable to provided target version and returns error if not. +func (m *Manager) DowngradeValidate(ctx context.Context, targetVersion *semver.Version) error { + // gets leaders commit index and wait for local store to finish applying that index + // to avoid using stale downgrade information + err := m.s.LinearizableReadNotify(ctx) + if err != nil { + return err + } + cv := m.s.GetClusterVersion() + allowedTargetVersion := allowedDowngradeVersion(cv) + if !targetVersion.Equal(*allowedTargetVersion) { + return ErrInvalidDowngradeTargetVersion + } + + downgradeInfo := m.s.GetDowngradeInfo() + if downgradeInfo != nil && downgradeInfo.Enabled { + // Todo: return the downgrade status along with the error msg + return ErrDowngradeInProcess + } + return nil +} + +// DowngradeEnable initiates etcd cluster version downgrade process. +func (m *Manager) DowngradeEnable(ctx context.Context, targetVersion *semver.Version) error { + // validate downgrade capability before starting downgrade + err := m.DowngradeValidate(ctx, targetVersion) + if err != nil { + return err + } + return m.s.DowngradeEnable(ctx, targetVersion) +} + +// DowngradeCancel cancels ongoing downgrade process. +func (m *Manager) DowngradeCancel(ctx context.Context) error { + err := m.s.LinearizableReadNotify(ctx) + if err != nil { + return err + } + downgradeInfo := m.s.GetDowngradeInfo() + if !downgradeInfo.Enabled { + return ErrNoInflightDowngrade + } + return m.s.DowngradeCancel(ctx) +} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/zap_raft.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/zap_raft.go index e8174f396ff0e..7672bdfbc4c3d 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/zap_raft.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/zap_raft.go @@ -17,10 +17,10 @@ package etcdserver import ( "errors" - "go.etcd.io/etcd/raft/v3" - "go.uber.org/zap" "go.uber.org/zap/zapcore" + + "go.etcd.io/raft/v3" ) // NewRaftLogger builds "raft.Logger" from "*zap.Config". @@ -37,7 +37,8 @@ func NewRaftLogger(lcfg *zap.Config) (raft.Logger, error) { // NewRaftLoggerZap converts "*zap.Logger" to "raft.Logger". func NewRaftLoggerZap(lg *zap.Logger) raft.Logger { - return &zapRaftLogger{lg: lg, sugar: lg.Sugar()} + skipCallerLg := lg.WithOptions(zap.AddCallerSkip(1)) + return &zapRaftLogger{lg: skipCallerLg, sugar: skipCallerLg.Sugar()} } // NewRaftLoggerFromZapCore creates "raft.Logger" from "zap.Core" @@ -53,50 +54,50 @@ type zapRaftLogger struct { sugar *zap.SugaredLogger } -func (zl *zapRaftLogger) Debug(args ...interface{}) { +func (zl *zapRaftLogger) Debug(args ...any) { zl.sugar.Debug(args...) } -func (zl *zapRaftLogger) Debugf(format string, args ...interface{}) { +func (zl *zapRaftLogger) Debugf(format string, args ...any) { zl.sugar.Debugf(format, args...) } -func (zl *zapRaftLogger) Error(args ...interface{}) { +func (zl *zapRaftLogger) Error(args ...any) { zl.sugar.Error(args...) } -func (zl *zapRaftLogger) Errorf(format string, args ...interface{}) { +func (zl *zapRaftLogger) Errorf(format string, args ...any) { zl.sugar.Errorf(format, args...) } -func (zl *zapRaftLogger) Info(args ...interface{}) { +func (zl *zapRaftLogger) Info(args ...any) { zl.sugar.Info(args...) } -func (zl *zapRaftLogger) Infof(format string, args ...interface{}) { +func (zl *zapRaftLogger) Infof(format string, args ...any) { zl.sugar.Infof(format, args...) } -func (zl *zapRaftLogger) Warning(args ...interface{}) { +func (zl *zapRaftLogger) Warning(args ...any) { zl.sugar.Warn(args...) } -func (zl *zapRaftLogger) Warningf(format string, args ...interface{}) { +func (zl *zapRaftLogger) Warningf(format string, args ...any) { zl.sugar.Warnf(format, args...) } -func (zl *zapRaftLogger) Fatal(args ...interface{}) { +func (zl *zapRaftLogger) Fatal(args ...any) { zl.sugar.Fatal(args...) } -func (zl *zapRaftLogger) Fatalf(format string, args ...interface{}) { +func (zl *zapRaftLogger) Fatalf(format string, args ...any) { zl.sugar.Fatalf(format, args...) } -func (zl *zapRaftLogger) Panic(args ...interface{}) { +func (zl *zapRaftLogger) Panic(args ...any) { zl.sugar.Panic(args...) } -func (zl *zapRaftLogger) Panicf(format string, args ...interface{}) { +func (zl *zapRaftLogger) Panicf(format string, args ...any) { zl.sugar.Panicf(format, args...) } diff --git a/vendor/go.etcd.io/etcd/server/v3/features/etcd_features.go b/vendor/go.etcd.io/etcd/server/v3/features/etcd_features.go new file mode 100644 index 0000000000000..abb1aa171f8d0 --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/features/etcd_features.go @@ -0,0 +1,76 @@ +// Copyright 2024 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package features + +import ( + "fmt" + + "go.uber.org/zap" + + "go.etcd.io/etcd/pkg/v3/featuregate" +) + +const ( + // Every feature gate should add method here following this template: + // + // // owner: @username + // // kep: https://kep.k8s.io/NNN (or issue: https://github.com/etcd-io/etcd/issues/NNN, or main PR: https://github.com/etcd-io/etcd/pull/NNN) + // // alpha: v3.X + // MyFeature featuregate.Feature = "MyFeature" + // + // Feature gates should be listed in alphabetical, case-sensitive + // (upper before any lower case character) order. This reduces the risk + // of code conflicts because changes are more likely to be scattered + // across the file. + + // DistributedTracing enables experimental distributed tracing using OpenTelemetry Tracing. + // owner: @dashpole + // alpha: v3.5 + // issue: https://github.com/etcd-io/etcd/issues/12460 + DistributedTracing featuregate.Feature = "DistributedTracing" + // StopGRPCServiceOnDefrag enables etcd gRPC service to stop serving client requests on defragmentation. + // owner: @chaochn47 + // alpha: v3.6 + // main PR: https://github.com/etcd-io/etcd/pull/18279 + StopGRPCServiceOnDefrag featuregate.Feature = "StopGRPCServiceOnDefrag" + // InitialCorruptCheck enable to check data corruption before serving any client/peer traffic. + // owner: @serathius + // alpha: v3.6 + // main PR: https://github.com/etcd-io/etcd/pull/10524 + InitialCorruptCheck featuregate.Feature = "InitialCorruptCheck" +) + +var ( + DefaultEtcdServerFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{ + DistributedTracing: {Default: false, PreRelease: featuregate.Alpha}, + StopGRPCServiceOnDefrag: {Default: false, PreRelease: featuregate.Alpha}, + InitialCorruptCheck: {Default: false, PreRelease: featuregate.Alpha}, + } + // ExperimentalFlagToFeatureMap is the map from the cmd line flags of experimental features + // to their corresponding feature gates. + // Deprecated: only add existing experimental features here. DO NOT use for new features. + ExperimentalFlagToFeatureMap = map[string]featuregate.Feature{ + "experimental-stop-grpc-service-on-defrag": StopGRPCServiceOnDefrag, + "experimental-initial-corrupt-check": InitialCorruptCheck, + } +) + +func NewDefaultServerFeatureGate(name string, lg *zap.Logger) featuregate.FeatureGate { + fg := featuregate.New(fmt.Sprintf("%sServerFeatureGate", name), lg) + if err := fg.Add(DefaultEtcdServerFeatureGates); err != nil { + panic(err) + } + return fg +} diff --git a/vendor/go.etcd.io/etcd/server/v3/lease/lease.go b/vendor/go.etcd.io/etcd/server/v3/lease/lease.go new file mode 100644 index 0000000000000..95f3eb6f75683 --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/lease/lease.go @@ -0,0 +1,135 @@ +// Copyright 2022 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lease + +import ( + "math" + "sync" + "time" + + "go.etcd.io/etcd/server/v3/lease/leasepb" + "go.etcd.io/etcd/server/v3/storage/backend" + "go.etcd.io/etcd/server/v3/storage/schema" +) + +type Lease struct { + ID LeaseID + ttl int64 // time to live of the lease in seconds + remainingTTL int64 // remaining time to live in seconds, if zero valued it is considered unset and the full ttl should be used + // expiryMu protects concurrent accesses to expiry + expiryMu sync.RWMutex + // expiry is time when lease should expire. no expiration when expiry.IsZero() is true + expiry time.Time + + // mu protects concurrent accesses to itemSet + mu sync.RWMutex + itemSet map[LeaseItem]struct{} + revokec chan struct{} +} + +func NewLease(id LeaseID, ttl int64) *Lease { + return &Lease{ + ID: id, + ttl: ttl, + itemSet: make(map[LeaseItem]struct{}), + revokec: make(chan struct{}), + } +} + +func (l *Lease) expired() bool { + return l.Remaining() <= 0 +} + +func (l *Lease) persistTo(b backend.Backend) { + lpb := leasepb.Lease{ID: int64(l.ID), TTL: l.ttl, RemainingTTL: l.remainingTTL} + tx := b.BatchTx() + tx.LockInsideApply() + defer tx.Unlock() + schema.MustUnsafePutLease(tx, &lpb) +} + +// TTL returns the TTL of the Lease. +func (l *Lease) TTL() int64 { + return l.ttl +} + +// SetLeaseItem sets the given lease item, this func is thread-safe +func (l *Lease) SetLeaseItem(item LeaseItem) { + l.mu.Lock() + defer l.mu.Unlock() + l.itemSet[item] = struct{}{} +} + +// getRemainingTTL returns the last checkpointed remaining TTL of the lease. +func (l *Lease) getRemainingTTL() int64 { + if l.remainingTTL > 0 { + return l.remainingTTL + } + return l.ttl +} + +// refresh refreshes the expiry of the lease. +func (l *Lease) refresh(extend time.Duration) { + newExpiry := time.Now().Add(extend + time.Duration(l.getRemainingTTL())*time.Second) + l.expiryMu.Lock() + defer l.expiryMu.Unlock() + l.expiry = newExpiry +} + +// forever sets the expiry of lease to be forever. +func (l *Lease) forever() { + l.expiryMu.Lock() + defer l.expiryMu.Unlock() + l.expiry = forever +} + +// Demoted returns true if the lease's expiry has been reset to forever. +func (l *Lease) Demoted() bool { + l.expiryMu.Lock() + defer l.expiryMu.Unlock() + return l.expiry == forever +} + +// Keys returns all the keys attached to the lease. +func (l *Lease) Keys() []string { + l.mu.RLock() + keys := make([]string, 0, len(l.itemSet)) + for k := range l.itemSet { + keys = append(keys, k.Key) + } + l.mu.RUnlock() + return keys +} + +// Remaining returns the remaining time of the lease. +func (l *Lease) Remaining() time.Duration { + l.expiryMu.RLock() + defer l.expiryMu.RUnlock() + if l.expiry.IsZero() { + return time.Duration(math.MaxInt64) + } + return time.Until(l.expiry) +} + +type LeaseItem struct { + Key string +} + +// leasesByExpiry implements the sort.Interface. +type leasesByExpiry []*Lease + +func (le leasesByExpiry) Len() int { return len(le) } +func (le leasesByExpiry) Less(i, j int) bool { return le[i].Remaining() < le[j].Remaining() } +func (le leasesByExpiry) Swap(i, j int) { le[i], le[j] = le[j], le[i] } diff --git a/vendor/go.etcd.io/etcd/server/v3/lease/lease_queue.go b/vendor/go.etcd.io/etcd/server/v3/lease/lease_queue.go index ffb7285ec081d..74ffee5c6963e 100644 --- a/vendor/go.etcd.io/etcd/server/v3/lease/lease_queue.go +++ b/vendor/go.etcd.io/etcd/server/v3/lease/lease_queue.go @@ -42,14 +42,14 @@ func (pq LeaseQueue) Swap(i, j int) { pq[j].index = j } -func (pq *LeaseQueue) Push(x interface{}) { +func (pq *LeaseQueue) Push(x any) { n := len(*pq) item := x.(*LeaseWithTime) item.index = n *pq = append(*pq, item) } -func (pq *LeaseQueue) Pop() interface{} { +func (pq *LeaseQueue) Pop() any { old := *pq n := len(old) item := old[n-1] @@ -96,7 +96,7 @@ func (mq *LeaseExpiredNotifier) Unregister() *LeaseWithTime { return item } -func (mq *LeaseExpiredNotifier) Poll() *LeaseWithTime { +func (mq *LeaseExpiredNotifier) Peek() *LeaseWithTime { if mq.Len() == 0 { return nil } diff --git a/vendor/go.etcd.io/etcd/server/v3/lease/leasehttp/http.go b/vendor/go.etcd.io/etcd/server/v3/lease/leasehttp/http.go index 9c815cbaeced6..d5572c3aafc7d 100644 --- a/vendor/go.etcd.io/etcd/server/v3/lease/leasehttp/http.go +++ b/vendor/go.etcd.io/etcd/server/v3/lease/leasehttp/http.go @@ -19,7 +19,7 @@ import ( "context" "errors" "fmt" - "io/ioutil" + "io" "net/http" "time" @@ -47,13 +47,13 @@ type leaseHandler struct { } func (h *leaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if r.Method != "POST" { + if r.Method != http.MethodPost { http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) return } defer r.Body.Close() - b, err := ioutil.ReadAll(r.Body) + b, err := io.ReadAll(r.Body) if err != nil { http.Error(w, "error reading body", http.StatusBadRequest) return @@ -75,7 +75,7 @@ func (h *leaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } ttl, rerr := h.l.Renew(lease.LeaseID(lreq.ID)) if rerr != nil { - if rerr == lease.ErrLeaseNotFound { + if errors.Is(rerr, lease.ErrLeaseNotFound) { http.Error(w, rerr.Error(), http.StatusNotFound) return } @@ -167,7 +167,7 @@ func RenewHTTP(ctx context.Context, id lease.LeaseID, url string, rt http.RoundT return http.ErrUseLastResponse }, } - req, err := http.NewRequest("POST", url, bytes.NewReader(lreq)) + req, err := http.NewRequest(http.MethodPost, url, bytes.NewReader(lreq)) if err != nil { return -1, err } @@ -192,12 +192,12 @@ func RenewHTTP(ctx context.Context, id lease.LeaseID, url string, rt http.RoundT } if resp.StatusCode != http.StatusOK { - return -1, fmt.Errorf("lease: unknown error(%s)", string(b)) + return -1, fmt.Errorf("lease: unknown error(%s)", b) } lresp := &pb.LeaseKeepAliveResponse{} if err := lresp.Unmarshal(b); err != nil { - return -1, fmt.Errorf(`lease: %v. data = "%s"`, err, string(b)) + return -1, fmt.Errorf(`lease: %w. data = "%s"`, err, b) } if lresp.ID != int64(id) { return -1, fmt.Errorf("lease: renew id mismatch") @@ -218,7 +218,7 @@ func TimeToLiveHTTP(ctx context.Context, id lease.LeaseID, keys bool, url string return nil, err } - req, err := http.NewRequest("POST", url, bytes.NewReader(lreq)) + req, err := http.NewRequest(http.MethodPost, url, bytes.NewReader(lreq)) if err != nil { return nil, err } @@ -254,16 +254,16 @@ func TimeToLiveHTTP(ctx context.Context, id lease.LeaseID, keys bool, url string lresp := &leasepb.LeaseInternalResponse{} if err := lresp.Unmarshal(b); err != nil { - return nil, fmt.Errorf(`lease: %v. data = "%s"`, err, string(b)) + return nil, fmt.Errorf(`lease: %w. data = "%s"`, err, string(b)) } if lresp.LeaseTimeToLiveResponse.ID != int64(id) { - return nil, fmt.Errorf("lease: renew id mismatch") + return nil, fmt.Errorf("lease: TTL id mismatch") } return lresp, nil } func readResponse(resp *http.Response) (b []byte, err error) { - b, err = ioutil.ReadAll(resp.Body) + b, err = io.ReadAll(resp.Body) httputil.GracefulClose(resp) return } diff --git a/vendor/go.etcd.io/etcd/server/v3/lease/leasepb/lease.pb.go b/vendor/go.etcd.io/etcd/server/v3/lease/leasepb/lease.pb.go index 8a1c54922fc07..ee833d31a7309 100644 --- a/vendor/go.etcd.io/etcd/server/v3/lease/leasepb/lease.pb.go +++ b/vendor/go.etcd.io/etcd/server/v3/lease/leasepb/lease.pb.go @@ -156,7 +156,7 @@ func init() { func init() { proto.RegisterFile("lease.proto", fileDescriptor_3dd57e402472b33a) } var fileDescriptor_3dd57e402472b33a = []byte{ - // 256 bytes of a gzipped FileDescriptorProto + // 283 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0xce, 0x49, 0x4d, 0x2c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x07, 0x73, 0x0a, 0x92, 0xa4, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x62, 0xfa, 0x20, 0x16, 0x44, 0x5a, 0x4a, 0x3e, 0xb5, 0x24, 0x39, 0x45, @@ -170,9 +170,11 @@ var fileDescriptor_3dd57e402472b33a = []byte{ 0x54, 0xa8, 0x0c, 0xd8, 0x46, 0x6e, 0x23, 0x15, 0x3d, 0x64, 0xf7, 0xe9, 0x61, 0x57, 0x1b, 0x84, 0xc3, 0x0c, 0xa5, 0x0a, 0x2e, 0x51, 0x34, 0x5b, 0x8b, 0x0b, 0xf2, 0xf3, 0x8a, 0x53, 0x85, 0xe2, 0xb9, 0xc4, 0x31, 0xb4, 0x40, 0xa4, 0xa0, 0xf6, 0xaa, 0x12, 0xb0, 0x17, 0xa2, 0x38, 0x08, 0x97, - 0x29, 0x4e, 0x12, 0x27, 0x1e, 0xca, 0x31, 0x5c, 0x78, 0x28, 0xc7, 0x70, 0xe2, 0x91, 0x1c, 0xe3, - 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0xce, 0x78, 0x2c, 0xc7, 0x90, 0xc4, 0x06, 0x0e, - 0x5f, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7b, 0x8a, 0x94, 0xb9, 0xae, 0x01, 0x00, 0x00, + 0x29, 0x4e, 0x9e, 0x27, 0x1e, 0xca, 0x31, 0x5c, 0x78, 0x28, 0xc7, 0x70, 0xe2, 0x91, 0x1c, 0xe3, + 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0xce, 0x78, 0x2c, 0xc7, 0x10, 0xa5, 0x9f, 0x9e, + 0x0f, 0x36, 0x5b, 0x2f, 0x33, 0x1f, 0x1c, 0xf6, 0xfa, 0x10, 0x4b, 0xf4, 0xcb, 0x8c, 0xf5, 0xc1, + 0x51, 0xa6, 0x0f, 0x8d, 0x38, 0x6b, 0x28, 0x9d, 0xc4, 0x06, 0x8e, 0x10, 0x63, 0x40, 0x00, 0x00, + 0x00, 0xff, 0xff, 0x0e, 0x16, 0x3b, 0xc4, 0xdf, 0x01, 0x00, 0x00, } func (m *Lease) Marshal() (dAtA []byte, err error) { diff --git a/vendor/go.etcd.io/etcd/server/v3/lease/leasepb/lease.proto b/vendor/go.etcd.io/etcd/server/v3/lease/leasepb/lease.proto index 5b40e3b17b676..d63179754835d 100644 --- a/vendor/go.etcd.io/etcd/server/v3/lease/leasepb/lease.proto +++ b/vendor/go.etcd.io/etcd/server/v3/lease/leasepb/lease.proto @@ -4,6 +4,8 @@ package leasepb; import "gogoproto/gogo.proto"; import "etcd/api/etcdserverpb/rpc.proto"; +option go_package = "go.etcd.io/etcd/server/v3/lease/leasepb;leasepb"; + option (gogoproto.marshaler_all) = true; option (gogoproto.sizer_all) = true; option (gogoproto.unmarshaler_all) = true; diff --git a/vendor/go.etcd.io/etcd/server/v3/lease/lessor.go b/vendor/go.etcd.io/etcd/server/v3/lease/lessor.go index c21a3e2be883e..cf2028933d582 100644 --- a/vendor/go.etcd.io/etcd/server/v3/lease/lessor.go +++ b/vendor/go.etcd.io/etcd/server/v3/lease/lessor.go @@ -17,20 +17,20 @@ package lease import ( "container/heap" "context" - "encoding/binary" "errors" - "fmt" "math" "sort" "sync" "time" "github.com/coreos/go-semver/semver" + "go.uber.org/zap" + pb "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.etcd.io/etcd/api/v3/version" "go.etcd.io/etcd/server/v3/lease/leasepb" - "go.etcd.io/etcd/server/v3/mvcc/backend" - "go.etcd.io/etcd/server/v3/mvcc/buckets" - "go.uber.org/zap" + "go.etcd.io/etcd/server/v3/storage/backend" + "go.etcd.io/etcd/server/v3/storage/schema" ) // NoLease is a special LeaseID representing the absence of a lease. @@ -39,8 +39,6 @@ const NoLease = LeaseID(0) // MaxLeaseTTL is the maximum lease TTL value const MaxLeaseTTL = 9000000000 -var v3_6 = semver.Version{Major: 3, Minor: 6} - var ( forever = time.Time{} @@ -333,6 +331,7 @@ func (le *lessor) Revoke(id LeaseID) error { le.mu.Unlock() return ErrLeaseNotFound } + defer close(l.revokec) // unlock before doing external work le.mu.Unlock() @@ -357,7 +356,7 @@ func (le *lessor) Revoke(id LeaseID) error { // lease deletion needs to be in the same backend transaction with the // kv deletion. Or we might end up with not executing the revoke or not // deleting the keys if etcdserver fails in between. - le.b.BatchTx().UnsafeDelete(buckets.Lease, int64ToBytes(int64(l.ID))) + schema.UnsafeDeleteLease(le.b.BatchTx(), &leasepb.Lease{ID: int64(l.ID)}) txn.End() @@ -385,11 +384,11 @@ func (le *lessor) Checkpoint(id LeaseID, remainingTTL int64) error { func (le *lessor) shouldPersistCheckpoints() bool { cv := le.cluster.Version() - return le.checkpointPersist || (cv != nil && greaterOrEqual(*cv, v3_6)) + return le.checkpointPersist || (cv != nil && greaterOrEqual(*cv, version.V3_6)) } func greaterOrEqual(first, second semver.Version) bool { - return !first.LessThan(second) + return !version.LessThan(first, second) } // Renew renews an existing lease. If the given lease does not exist or @@ -524,12 +523,6 @@ func (le *lessor) Promote(extend time.Duration) { } } -type leasesByExpiry []*Lease - -func (le leasesByExpiry) Len() int { return len(le) } -func (le leasesByExpiry) Less(i, j int) bool { return le[i].Remaining() < le[j].Remaining() } -func (le leasesByExpiry) Swap(i, j int) { le[i], le[j] = le[j], le[i] } - func (le *lessor) Demote() { le.mu.Lock() defer le.mu.Unlock() @@ -619,12 +612,15 @@ func (le *lessor) Stop() { func (le *lessor) runLoop() { defer close(le.doneC) + delayTicker := time.NewTicker(500 * time.Millisecond) + defer delayTicker.Stop() + for { le.revokeExpiredLeases() le.checkpointScheduledLeases() select { - case <-time.After(500 * time.Millisecond): + case <-delayTicker.C: case <-le.stopC: return } @@ -664,6 +660,7 @@ func (le *lessor) checkpointScheduledLeases() { // rate limit for i := 0; i < leaseCheckpointRate/2; i++ { var cps []*pb.LeaseCheckpoint + le.mu.Lock() if le.isPrimary() { cps = le.findDueScheduledCheckpoints(maxLeaseCheckpointBatchSize) @@ -689,33 +686,33 @@ func (le *lessor) clearLeaseExpiredNotifier() { le.leaseExpiredNotifier = newLeaseExpiredNotifier() } -// expireExists returns true if expiry items exist. +// expireExists returns "l" which is not nil if expiry items exist. // It pops only when expiry item exists. // "next" is true, to indicate that it may exist in next attempt. -func (le *lessor) expireExists() (l *Lease, ok bool, next bool) { +func (le *lessor) expireExists() (l *Lease, next bool) { if le.leaseExpiredNotifier.Len() == 0 { - return nil, false, false + return nil, false } - item := le.leaseExpiredNotifier.Poll() + item := le.leaseExpiredNotifier.Peek() l = le.leaseMap[item.id] if l == nil { // lease has expired or been revoked // no need to revoke (nothing is expiry) le.leaseExpiredNotifier.Unregister() // O(log N) - return nil, false, true + return nil, true } now := time.Now() if now.Before(item.time) /* item.time: expiration time */ { // Candidate expirations are caught up, reinsert this item // and no need to revoke (nothing is expiry) - return l, false, false + return nil, false } // recheck if revoke is complete after retry interval item.time = now.Add(le.expiredLeaseRetryInterval) le.leaseExpiredNotifier.RegisterOrUpdate(item) - return l, true, false + return l, false } // findExpiredLeases loops leases in the leaseMap until reaching expired limit @@ -724,13 +721,10 @@ func (le *lessor) findExpiredLeases(limit int) []*Lease { leases := make([]*Lease, 0, 16) for { - l, ok, next := le.expireExists() - if !ok && !next { + l, next := le.expireExists() + if l == nil && !next { break } - if !ok { - continue - } if next { continue } @@ -753,7 +747,7 @@ func (le *lessor) scheduleCheckpointIfNeeded(lease *Lease) { return } - if lease.RemainingTTL() > int64(le.checkpointInterval.Seconds()) { + if lease.getRemainingTTL() > int64(le.checkpointInterval.Seconds()) { if le.lg != nil { le.lg.Debug("Scheduling lease checkpoint", zap.Int64("leaseID", int64(lease.ID)), @@ -773,7 +767,7 @@ func (le *lessor) findDueScheduledCheckpoints(checkpointLimit int) []*pb.LeaseCh } now := time.Now() - cps := []*pb.LeaseCheckpoint{} + var cps []*pb.LeaseCheckpoint for le.leaseCheckpointHeap.Len() > 0 && len(cps) < checkpointLimit { lt := le.leaseCheckpointHeap[0] if lt.time.After(now) /* lt.time: next checkpoint time */ { @@ -805,10 +799,10 @@ func (le *lessor) findDueScheduledCheckpoints(checkpointLimit int) []*pb.LeaseCh func (le *lessor) initAndRecover() { tx := le.b.BatchTx() - tx.LockOutsideApply() - tx.UnsafeCreateBucket(buckets.Lease) - lpbs := unsafeGetAllLeases(tx) + tx.LockOutsideApply() + schema.UnsafeCreateLeaseBucket(tx) + lpbs := schema.MustUnsafeGetAllLeases(tx) tx.Unlock() for _, lpb := range lpbs { ID := LeaseID(lpb.ID) @@ -832,155 +826,20 @@ func (le *lessor) initAndRecover() { le.b.ForceCommit() } -type Lease struct { - ID LeaseID - ttl int64 // time to live of the lease in seconds - remainingTTL int64 // remaining time to live in seconds, if zero valued it is considered unset and the full ttl should be used - // expiryMu protects concurrent accesses to expiry - expiryMu sync.RWMutex - // expiry is time when lease should expire. no expiration when expiry.IsZero() is true - expiry time.Time - - // mu protects concurrent accesses to itemSet - mu sync.RWMutex - itemSet map[LeaseItem]struct{} - revokec chan struct{} -} - -func NewLease(id LeaseID, ttl int64) *Lease { - return &Lease{ - ID: id, - ttl: ttl, - itemSet: make(map[LeaseItem]struct{}), - revokec: make(chan struct{}), - } -} - -func (l *Lease) expired() bool { - return l.Remaining() <= 0 -} - -func (l *Lease) persistTo(b backend.Backend) { - key := int64ToBytes(int64(l.ID)) - - lpb := leasepb.Lease{ID: int64(l.ID), TTL: l.ttl, RemainingTTL: l.remainingTTL} - val, err := lpb.Marshal() - if err != nil { - panic("failed to marshal lease proto item") - } - - b.BatchTx().LockInsideApply() - b.BatchTx().UnsafePut(buckets.Lease, key, val) - b.BatchTx().Unlock() -} - -// TTL returns the TTL of the Lease. -func (l *Lease) TTL() int64 { - return l.ttl -} - -// SetLeaseItem sets the given lease item, this func is thread-safe -func (l *Lease) SetLeaseItem(item LeaseItem) { - l.mu.Lock() - defer l.mu.Unlock() - l.itemSet[item] = struct{}{} -} - -// RemainingTTL returns the last checkpointed remaining TTL of the lease. -// TODO(jpbetz): do not expose this utility method -func (l *Lease) RemainingTTL() int64 { - if l.remainingTTL > 0 { - return l.remainingTTL - } - return l.ttl -} - -// refresh refreshes the expiry of the lease. -func (l *Lease) refresh(extend time.Duration) { - newExpiry := time.Now().Add(extend + time.Duration(l.RemainingTTL())*time.Second) - l.expiryMu.Lock() - defer l.expiryMu.Unlock() - l.expiry = newExpiry -} - -// forever sets the expiry of lease to be forever. -func (l *Lease) forever() { - l.expiryMu.Lock() - defer l.expiryMu.Unlock() - l.expiry = forever -} - -// Demoted returns true if the lease's expiry has been reset to forever. -func (l *Lease) Demoted() bool { - l.expiryMu.Lock() - defer l.expiryMu.Unlock() - return l.expiry == forever -} - -// Keys returns all the keys attached to the lease. -func (l *Lease) Keys() []string { - l.mu.RLock() - keys := make([]string, 0, len(l.itemSet)) - for k := range l.itemSet { - keys = append(keys, k.Key) - } - l.mu.RUnlock() - return keys -} - -// Remaining returns the remaining time of the lease. -func (l *Lease) Remaining() time.Duration { - l.expiryMu.RLock() - defer l.expiryMu.RUnlock() - if l.expiry.IsZero() { - return time.Duration(math.MaxInt64) - } - return time.Until(l.expiry) -} - -type LeaseItem struct { - Key string -} - -func int64ToBytes(n int64) []byte { - bytes := make([]byte, 8) - binary.BigEndian.PutUint64(bytes, uint64(n)) - return bytes -} - -func bytesToLeaseID(bytes []byte) int64 { - if len(bytes) != 8 { - panic(fmt.Errorf("lease ID must be 8-byte")) - } - return int64(binary.BigEndian.Uint64(bytes)) -} - -func unsafeGetAllLeases(tx backend.ReadTx) []*leasepb.Lease { - ls := make([]*leasepb.Lease, 0) - err := tx.UnsafeForEach(buckets.Lease, func(k, v []byte) error { - var lpb leasepb.Lease - err := lpb.Unmarshal(v) - if err != nil { - return fmt.Errorf("failed to Unmarshal lease proto item; lease ID=%016x", bytesToLeaseID(k)) - } - ls = append(ls, &lpb) - return nil - }) - if err != nil { - panic(err) - } - return ls -} - // FakeLessor is a fake implementation of Lessor interface. // Used for testing only. -type FakeLessor struct{} +type FakeLessor struct { + LeaseSet map[LeaseID]struct{} +} func (fl *FakeLessor) SetRangeDeleter(dr RangeDeleter) {} func (fl *FakeLessor) SetCheckpointer(cp Checkpointer) {} -func (fl *FakeLessor) Grant(id LeaseID, ttl int64) (*Lease, error) { return nil, nil } +func (fl *FakeLessor) Grant(id LeaseID, ttl int64) (*Lease, error) { + fl.LeaseSet[id] = struct{}{} + return nil, nil +} func (fl *FakeLessor) Revoke(id LeaseID) error { return nil } @@ -997,7 +856,12 @@ func (fl *FakeLessor) Demote() {} func (fl *FakeLessor) Renew(id LeaseID) (int64, error) { return 10, nil } -func (fl *FakeLessor) Lookup(id LeaseID) *Lease { return nil } +func (fl *FakeLessor) Lookup(id LeaseID) *Lease { + if _, ok := fl.LeaseSet[id]; ok { + return &Lease{ID: id} + } + return nil +} func (fl *FakeLessor) Leases() []*Lease { return nil } diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/revision.go b/vendor/go.etcd.io/etcd/server/v3/mvcc/revision.go deleted file mode 100644 index d6213866f26e1..0000000000000 --- a/vendor/go.etcd.io/etcd/server/v3/mvcc/revision.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import "encoding/binary" - -// revBytesLen is the byte length of a normal revision. -// First 8 bytes is the revision.main in big-endian format. The 9th byte -// is a '_'. The last 8 bytes is the revision.sub in big-endian format. -const revBytesLen = 8 + 1 + 8 - -// A revision indicates modification of the key-value space. -// The set of changes that share same main revision changes the key-value space atomically. -type revision struct { - // main is the main revision of a set of changes that happen atomically. - main int64 - - // sub is the sub revision of a change in a set of changes that happen - // atomically. Each change has different increasing sub revision in that - // set. - sub int64 -} - -func (a revision) GreaterThan(b revision) bool { - if a.main > b.main { - return true - } - if a.main < b.main { - return false - } - return a.sub > b.sub -} - -func newRevBytes() []byte { - return make([]byte, revBytesLen, markedRevBytesLen) -} - -func revToBytes(rev revision, bytes []byte) { - binary.BigEndian.PutUint64(bytes, uint64(rev.main)) - bytes[8] = '_' - binary.BigEndian.PutUint64(bytes[9:], uint64(rev.sub)) -} - -func bytesToRev(bytes []byte) revision { - return revision{ - main: int64(binary.BigEndian.Uint64(bytes[0:8])), - sub: int64(binary.BigEndian.Uint64(bytes[9:])), - } -} - -type revisions []revision - -func (a revisions) Len() int { return len(a) } -func (a revisions) Less(i, j int) bool { return a[j].GreaterThan(a[i]) } -func (a revisions) Swap(i, j int) { a[i], a[j] = a[j], a[i] } diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/util.go b/vendor/go.etcd.io/etcd/server/v3/mvcc/util.go deleted file mode 100644 index ea3111267b895..0000000000000 --- a/vendor/go.etcd.io/etcd/server/v3/mvcc/util.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "fmt" - - "go.etcd.io/etcd/api/v3/mvccpb" - "go.etcd.io/etcd/server/v3/mvcc/backend" - "go.etcd.io/etcd/server/v3/mvcc/buckets" -) - -func WriteKV(be backend.Backend, kv mvccpb.KeyValue) { - ibytes := newRevBytes() - revToBytes(revision{main: kv.ModRevision}, ibytes) - - d, err := kv.Marshal() - if err != nil { - panic(fmt.Errorf("cannot marshal event: %v", err)) - } - - be.BatchTx().LockOutsideApply() - be.BatchTx().UnsafePut(buckets.Key, ibytes, d) - be.BatchTx().Unlock() -} - -func UnsafeSetScheduledCompact(tx backend.BatchTx, value int64) { - rbytes := newRevBytes() - revToBytes(revision{main: value}, rbytes) - tx.UnsafePut(buckets.Meta, scheduledCompactKeyName, rbytes) -} - -func UnsafeReadFinishedCompact(tx backend.ReadTx) (int64, bool) { - _, finishedCompactBytes := tx.UnsafeRange(buckets.Meta, finishedCompactKeyName, nil, 0) - if len(finishedCompactBytes) != 0 { - return bytesToRev(finishedCompactBytes[0]).main, true - } - return 0, false -} - -func UnsafeReadScheduledCompact(tx backend.ReadTx) (int64, bool) { - _, scheduledCompactBytes := tx.UnsafeRange(buckets.Meta, scheduledCompactKeyName, nil, 0) - if len(scheduledCompactBytes) != 0 { - return bytesToRev(scheduledCompactBytes[0]).main, true - } - return 0, false -} diff --git a/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/auth_client_adapter.go b/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/auth_client_adapter.go index 140212b96202f..0baa10c5f16d3 100644 --- a/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/auth_client_adapter.go +++ b/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/auth_client_adapter.go @@ -17,9 +17,9 @@ package adapter import ( "context" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - grpc "google.golang.org/grpc" + + pb "go.etcd.io/etcd/api/v3/etcdserverpb" ) type as2ac struct{ as pb.AuthServer } diff --git a/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/chan_stream.go b/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/chan_stream.go index 1af514b1fdd3a..eaf4309c9783e 100644 --- a/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/chan_stream.go +++ b/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/chan_stream.go @@ -96,15 +96,15 @@ func (cs *chanClientStream) CloseSend() error { // chanStream implements grpc.Stream using channels type chanStream struct { - recvc <-chan interface{} - sendc chan<- interface{} + recvc <-chan any + sendc chan<- any ctx context.Context cancel context.CancelFunc } func (s *chanStream) Context() context.Context { return s.ctx } -func (s *chanStream) SendMsg(m interface{}) error { +func (s *chanStream) SendMsg(m any) error { select { case s.sendc <- m: if err, ok := m.(error); ok { @@ -116,8 +116,8 @@ func (s *chanStream) SendMsg(m interface{}) error { return s.ctx.Err() } -func (s *chanStream) RecvMsg(m interface{}) error { - v := m.(*interface{}) +func (s *chanStream) RecvMsg(m any) error { + v := m.(*any) for { select { case msg, ok := <-s.recvc: @@ -141,7 +141,7 @@ func (s *chanStream) RecvMsg(m interface{}) error { func newPipeStream(ctx context.Context, ssHandler func(chanServerStream) error) chanClientStream { // ch1 is buffered so server can send error on close - ch1, ch2 := make(chan interface{}, 1), make(chan interface{}) + ch1, ch2 := make(chan any, 1), make(chan any) headerc, trailerc := make(chan metadata.MD, 1), make(chan metadata.MD, 1) cctx, ccancel := context.WithCancel(ctx) diff --git a/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/cluster_client_adapter.go b/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/cluster_client_adapter.go index c1fff054de42a..4c9fbbb082665 100644 --- a/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/cluster_client_adapter.go +++ b/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/cluster_client_adapter.go @@ -17,9 +17,9 @@ package adapter import ( "context" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "google.golang.org/grpc" + + pb "go.etcd.io/etcd/api/v3/etcdserverpb" ) type cls2clc struct{ cls pb.ClusterServer } diff --git a/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/election_client_adapter.go b/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/election_client_adapter.go index 81d7434474ac0..b1266318f9ad8 100644 --- a/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/election_client_adapter.go +++ b/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/election_client_adapter.go @@ -17,9 +17,9 @@ package adapter import ( "context" - "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb" - "google.golang.org/grpc" + + "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb" ) type es2ec struct{ es v3electionpb.ElectionServer } @@ -61,7 +61,7 @@ func (s *es2ecClientStream) Send(rr *v3electionpb.LeaderRequest) error { return s.SendMsg(rr) } func (s *es2ecClientStream) Recv() (*v3electionpb.LeaderResponse, error) { - var v interface{} + var v any if err := s.RecvMsg(&v); err != nil { return nil, err } @@ -72,7 +72,7 @@ func (s *es2ecServerStream) Send(rr *v3electionpb.LeaderResponse) error { return s.SendMsg(rr) } func (s *es2ecServerStream) Recv() (*v3electionpb.LeaderRequest, error) { - var v interface{} + var v any if err := s.RecvMsg(&v); err != nil { return nil, err } diff --git a/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/kv_client_adapter.go b/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/kv_client_adapter.go index ddb6ada473274..69e3a113c4fe7 100644 --- a/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/kv_client_adapter.go +++ b/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/kv_client_adapter.go @@ -17,9 +17,9 @@ package adapter import ( "context" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - grpc "google.golang.org/grpc" + + pb "go.etcd.io/etcd/api/v3/etcdserverpb" ) type kvs2kvc struct{ kvs pb.KVServer } diff --git a/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/lease_client_adapter.go b/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/lease_client_adapter.go index 6640d1d39e353..4369c3052f5c5 100644 --- a/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/lease_client_adapter.go +++ b/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/lease_client_adapter.go @@ -17,9 +17,9 @@ package adapter import ( "context" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "google.golang.org/grpc" + + pb "go.etcd.io/etcd/api/v3/etcdserverpb" ) type ls2lc struct { @@ -63,7 +63,7 @@ func (s *ls2lcClientStream) Send(rr *pb.LeaseKeepAliveRequest) error { return s.SendMsg(rr) } func (s *ls2lcClientStream) Recv() (*pb.LeaseKeepAliveResponse, error) { - var v interface{} + var v any if err := s.RecvMsg(&v); err != nil { return nil, err } @@ -74,7 +74,7 @@ func (s *ls2lcServerStream) Send(rr *pb.LeaseKeepAliveResponse) error { return s.SendMsg(rr) } func (s *ls2lcServerStream) Recv() (*pb.LeaseKeepAliveRequest, error) { - var v interface{} + var v any if err := s.RecvMsg(&v); err != nil { return nil, err } diff --git a/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/lock_client_adapter.go b/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/lock_client_adapter.go index a3ceaf26dae25..de5ba049e1970 100644 --- a/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/lock_client_adapter.go +++ b/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/lock_client_adapter.go @@ -17,9 +17,9 @@ package adapter import ( "context" - "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb" - "google.golang.org/grpc" + + "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb" ) type ls2lsc struct{ ls v3lockpb.LockServer } diff --git a/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/maintenance_client_adapter.go b/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/maintenance_client_adapter.go index 6369a16d8b40d..d3bb805a1f60d 100644 --- a/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/maintenance_client_adapter.go +++ b/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/maintenance_client_adapter.go @@ -17,9 +17,9 @@ package adapter import ( "context" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "google.golang.org/grpc" + + pb "go.etcd.io/etcd/api/v3/etcdserverpb" ) type mts2mtc struct{ mts pb.MaintenanceServer } @@ -73,7 +73,7 @@ func (s *ss2scClientStream) Send(rr *pb.SnapshotRequest) error { return s.SendMsg(rr) } func (s *ss2scClientStream) Recv() (*pb.SnapshotResponse, error) { - var v interface{} + var v any if err := s.RecvMsg(&v); err != nil { return nil, err } @@ -84,7 +84,7 @@ func (s *ss2scServerStream) Send(rr *pb.SnapshotResponse) error { return s.SendMsg(rr) } func (s *ss2scServerStream) Recv() (*pb.SnapshotRequest, error) { - var v interface{} + var v any if err := s.RecvMsg(&v); err != nil { return nil, err } diff --git a/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/watch_client_adapter.go b/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/watch_client_adapter.go index fbc09f6ff641c..43f2ab7677d10 100644 --- a/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/watch_client_adapter.go +++ b/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/watch_client_adapter.go @@ -18,8 +18,9 @@ import ( "context" "errors" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" "google.golang.org/grpc" + + pb "go.etcd.io/etcd/api/v3/etcdserverpb" ) var errAlreadySentHeader = errors.New("adapter: already sent header") @@ -47,7 +48,7 @@ func (s *ws2wcClientStream) Send(wr *pb.WatchRequest) error { return s.SendMsg(wr) } func (s *ws2wcClientStream) Recv() (*pb.WatchResponse, error) { - var v interface{} + var v any if err := s.RecvMsg(&v); err != nil { return nil, err } @@ -58,7 +59,7 @@ func (s *ws2wcServerStream) Send(wr *pb.WatchResponse) error { return s.SendMsg(wr) } func (s *ws2wcServerStream) Recv() (*pb.WatchRequest, error) { - var v interface{} + var v any if err := s.RecvMsg(&v); err != nil { return nil, err } diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/backend.go b/vendor/go.etcd.io/etcd/server/v3/storage/backend.go similarity index 70% rename from vendor/go.etcd.io/etcd/server/v3/etcdserver/backend.go rename to vendor/go.etcd.io/etcd/server/v3/storage/backend.go index 2beef576346e5..b7b0d6861ad5e 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/backend.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/backend.go @@ -12,24 +12,24 @@ // See the License for the specific language governing permissions and // limitations under the License. -package etcdserver +package storage import ( "fmt" "os" "time" - "go.etcd.io/etcd/raft/v3/raftpb" + "go.uber.org/zap" + "go.etcd.io/etcd/server/v3/config" "go.etcd.io/etcd/server/v3/etcdserver/api/snap" - "go.etcd.io/etcd/server/v3/etcdserver/cindex" - "go.etcd.io/etcd/server/v3/mvcc/backend" - - "go.uber.org/zap" + "go.etcd.io/etcd/server/v3/storage/backend" + "go.etcd.io/etcd/server/v3/storage/schema" + "go.etcd.io/raft/v3/raftpb" ) func newBackend(cfg config.ServerConfig, hooks backend.Hooks) backend.Backend { - bcfg := backend.DefaultBackendConfig() + bcfg := backend.DefaultBackendConfig(cfg.Logger) bcfg.Path = cfg.BackendPath() bcfg.UnsafeNoFsync = cfg.UnsafeNoFsync if cfg.BackendBatchLimit != 0 { @@ -55,20 +55,20 @@ func newBackend(cfg config.ServerConfig, hooks backend.Hooks) backend.Backend { return backend.New(bcfg) } -// openSnapshotBackend renames a snapshot db to the current etcd db and opens it. -func openSnapshotBackend(cfg config.ServerConfig, ss *snap.Snapshotter, snapshot raftpb.Snapshot, hooks backend.Hooks) (backend.Backend, error) { +// OpenSnapshotBackend renames a snapshot db to the current etcd db and opens it. +func OpenSnapshotBackend(cfg config.ServerConfig, ss *snap.Snapshotter, snapshot raftpb.Snapshot, hooks *BackendHooks) (backend.Backend, error) { snapPath, err := ss.DBFilePath(snapshot.Metadata.Index) if err != nil { - return nil, fmt.Errorf("failed to find database snapshot file (%v)", err) + return nil, fmt.Errorf("failed to find database snapshot file (%w)", err) } if err := os.Rename(snapPath, cfg.BackendPath()); err != nil { - return nil, fmt.Errorf("failed to rename database snapshot file (%v)", err) + return nil, fmt.Errorf("failed to rename database snapshot file (%w)", err) } - return openBackend(cfg, hooks), nil + return OpenBackend(cfg, hooks), nil } -// openBackend returns a backend using the current etcd db. -func openBackend(cfg config.ServerConfig, hooks backend.Hooks) backend.Backend { +// OpenBackend returns a backend using the current etcd db. +func OpenBackend(cfg config.ServerConfig, hooks backend.Hooks) backend.Backend { fn := cfg.BackendPath() now, beOpened := time.Now(), make(chan backend.Backend) @@ -76,9 +76,12 @@ func openBackend(cfg config.ServerConfig, hooks backend.Hooks) backend.Backend { beOpened <- newBackend(cfg, hooks) }() + defer func() { + cfg.Logger.Info("opened backend db", zap.String("path", fn), zap.Duration("took", time.Since(now))) + }() + select { case be := <-beOpened: - cfg.Logger.Info("opened backend db", zap.String("path", fn), zap.Duration("took", time.Since(now))) return be case <-time.After(10 * time.Second): @@ -92,18 +95,20 @@ func openBackend(cfg config.ServerConfig, hooks backend.Hooks) backend.Backend { return <-beOpened } -// recoverBackendSnapshot recovers the DB from a snapshot in case etcd crashes +// RecoverSnapshotBackend recovers the DB from a snapshot in case etcd crashes // before updating the backend db after persisting raft snapshot to disk, // violating the invariant snapshot.Metadata.Index < db.consistentIndex. In this // case, replace the db with the snapshot db sent by the leader. -func recoverSnapshotBackend(cfg config.ServerConfig, oldbe backend.Backend, snapshot raftpb.Snapshot, beExist bool, hooks backend.Hooks) (backend.Backend, error) { +func RecoverSnapshotBackend(cfg config.ServerConfig, oldbe backend.Backend, snapshot raftpb.Snapshot, beExist bool, hooks *BackendHooks) (backend.Backend, error) { consistentIndex := uint64(0) if beExist { - consistentIndex, _ = cindex.ReadConsistentIndex(oldbe.ReadTx()) + consistentIndex, _ = schema.ReadConsistentIndex(oldbe.ReadTx()) } if snapshot.Metadata.Index <= consistentIndex { + cfg.Logger.Info("Skipping snapshot backend", zap.Uint64("consistent-index", consistentIndex), zap.Uint64("snapshot-index", snapshot.Metadata.Index)) return oldbe, nil } + cfg.Logger.Info("Recovering from snapshot backend", zap.Uint64("consistent-index", consistentIndex), zap.Uint64("snapshot-index", snapshot.Metadata.Index)) oldbe.Close() - return openSnapshotBackend(cfg, snap.New(cfg.Logger, cfg.SnapDir()), snapshot, hooks) + return OpenSnapshotBackend(cfg, snap.New(cfg.Logger, cfg.SnapDir()), snapshot, hooks) } diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/backend/backend.go b/vendor/go.etcd.io/etcd/server/v3/storage/backend/backend.go similarity index 92% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/backend/backend.go rename to vendor/go.etcd.io/etcd/server/v3/storage/backend/backend.go index 7d77da12fd6d7..9042208db6d00 100644 --- a/vendor/go.etcd.io/etcd/server/v3/mvcc/backend/backend.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/backend/backend.go @@ -18,7 +18,6 @@ import ( "fmt" "hash/crc32" "io" - "io/ioutil" "os" "path/filepath" "sync" @@ -26,8 +25,9 @@ import ( "time" humanize "github.com/dustin/go-humanize" - bolt "go.etcd.io/bbolt" "go.uber.org/zap" + + bolt "go.etcd.io/bbolt" ) var ( @@ -153,11 +153,12 @@ type BackendConfig struct { type BackendConfigOption func(*BackendConfig) -func DefaultBackendConfig() BackendConfig { +func DefaultBackendConfig(lg *zap.Logger) BackendConfig { return BackendConfig{ BatchInterval: defaultBatchInterval, BatchLimit: defaultBatchLimit, MmapSize: InitialMmapSize, + Logger: lg, } } @@ -171,8 +172,8 @@ func WithMmapSize(size uint64) BackendConfigOption { } } -func NewDefaultBackend(path string, opts ...BackendConfigOption) Backend { - bcfg := DefaultBackendConfig() +func NewDefaultBackend(lg *zap.Logger, path string, opts ...BackendConfigOption) Backend { + bcfg := DefaultBackendConfig(lg) bcfg.Path = path for _, opt := range opts { opt(&bcfg) @@ -182,10 +183,6 @@ func NewDefaultBackend(path string, opts ...BackendConfigOption) Backend { } func newBackend(bcfg BackendConfig) *backend { - if bcfg.Logger == nil { - bcfg.Logger = zap.NewNop() - } - bopts := &bolt.Options{} if boltOpenOptions != nil { *bopts = *boltOpenOptions @@ -195,6 +192,7 @@ func newBackend(bcfg BackendConfig) *backend { bopts.NoSync = bcfg.UnsafeNoFsync bopts.NoGrowSync = bcfg.UnsafeNoFsync bopts.Mlock = bcfg.Mlock + bopts.Logger = newBoltLoggerZap(bcfg) db, err := bolt.Open(bcfg.Path, 0600, bopts) if err != nil { @@ -396,7 +394,7 @@ func (b *backend) Hash(ignores func(bucketName, keyName []byte) bool) (uint32, e for next, _ := c.First(); next != nil; next, _ = c.Next() { b := tx.Bucket(next) if b == nil { - return fmt.Errorf("cannot get hash of bucket %s", string(next)) + return fmt.Errorf("cannot get hash of bucket %s", next) } h.Write(next) b.ForEach(func(k, v []byte) error { @@ -446,6 +444,8 @@ func (b *backend) run() { func (b *backend) Close() error { close(b.stopc) <-b.donec + b.mu.Lock() + defer b.mu.Unlock() return b.db.Close() } @@ -477,22 +477,21 @@ func (b *backend) defrag() error { b.readTx.Lock() defer b.readTx.Unlock() - b.batchTx.unsafeCommit(true) - - b.batchTx.tx = nil - // Create a temporary file to ensure we start with a clean slate. // Snapshotter.cleanupSnapdir cleans up any of these that are found during startup. dir := filepath.Dir(b.db.Path()) - temp, err := ioutil.TempFile(dir, "db.tmp.*") + temp, err := os.CreateTemp(dir, "db.tmp.*") if err != nil { return err } + options := bolt.Options{} if boltOpenOptions != nil { options = *boltOpenOptions } options.OpenFile = func(_ string, _ int, _ os.FileMode) (file *os.File, err error) { + // gofail: var defragOpenFileError string + // return nil, fmt.Errorf(defragOpenFileError) return temp, nil } // Don't load tmp db into memory regardless of opening options @@ -500,6 +499,15 @@ func (b *backend) defrag() error { tdbp := temp.Name() tmpdb, err := bolt.Open(tdbp, 0600, &options) if err != nil { + temp.Close() + if rmErr := os.Remove(temp.Name()); rmErr != nil && b.lg != nil { + b.lg.Error( + "failed to remove temporary file", + zap.String("path", temp.Name()), + zap.Error(rmErr), + ) + } + return err } @@ -515,6 +523,11 @@ func (b *backend) defrag() error { zap.String("current-db-size-in-use", humanize.Bytes(uint64(sizeInUse1))), ) } + + // Commit/stop and then reset current transactions (including the readTx) + b.batchTx.unsafeCommit(true) + b.batchTx.tx = nil + // gofail: var defragBeforeCopy struct{} err = defragdb(b.db, tmpdb, defragLimit) if err != nil { @@ -522,6 +535,11 @@ func (b *backend) defrag() error { if rmErr := os.RemoveAll(tmpdb.Path()); rmErr != nil { b.lg.Error("failed to remove db.tmp after defragmentation completed", zap.Error(rmErr)) } + + // restore the bbolt transactions if defragmentation fails + b.batchTx.tx = b.unsafeBegin(true) + b.readTx.tx = b.unsafeBegin(false) + return err } @@ -574,6 +592,9 @@ func (b *backend) defrag() error { } func defragdb(odb, tmpdb *bolt.DB, limit int) error { + // gofail: var defragdbFail string + // return fmt.Errorf(defragdbFail) + // open a tx on tmpdb for writes tmptx, err := tmpdb.Begin(true) if err != nil { @@ -598,7 +619,7 @@ func defragdb(odb, tmpdb *bolt.DB, limit int) error { for next, _ := c.First(); next != nil; next, _ = c.Next() { b := tx.Bucket(next) if b == nil { - return fmt.Errorf("backend: cannot defrag bucket %s", string(next)) + return fmt.Errorf("backend: cannot defrag bucket %s", next) } tmpb, berr := tmptx.CreateBucketIfNotExists(next) @@ -648,7 +669,9 @@ func (b *backend) begin(write bool) *bolt.Tx { } func (b *backend) unsafeBegin(write bool) *bolt.Tx { + // gofail: var beforeStartDBTxn struct{} tx, err := b.db.Begin(write) + // gofail: var afterStartDBTxn struct{} if err != nil { b.lg.Fatal("failed to begin tx", zap.Error(err)) } @@ -670,3 +693,20 @@ func (s *snapshot) Close() error { <-s.donec return s.Tx.Rollback() } + +func newBoltLoggerZap(bcfg BackendConfig) bolt.Logger { + lg := bcfg.Logger.Named("bbolt") + return &zapBoltLogger{lg.WithOptions(zap.AddCallerSkip(1)).Sugar()} +} + +type zapBoltLogger struct { + *zap.SugaredLogger +} + +func (zl *zapBoltLogger) Warning(args ...any) { + zl.SugaredLogger.Warn(args...) +} + +func (zl *zapBoltLogger) Warningf(format string, args ...any) { + zl.SugaredLogger.Warnf(format, args...) +} diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/backend/batch_tx.go b/vendor/go.etcd.io/etcd/server/v3/storage/backend/batch_tx.go similarity index 95% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/backend/batch_tx.go rename to vendor/go.etcd.io/etcd/server/v3/storage/backend/batch_tx.go index 2d5d3eaf4a146..5af557cb42837 100644 --- a/vendor/go.etcd.io/etcd/server/v3/mvcc/backend/batch_tx.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/backend/batch_tx.go @@ -16,13 +16,16 @@ package backend import ( "bytes" + "errors" "math" "sync" "sync/atomic" "time" - bolt "go.etcd.io/bbolt" "go.uber.org/zap" + + bolt "go.etcd.io/bbolt" + bolterrors "go.etcd.io/bbolt/errors" ) type BucketID int @@ -43,18 +46,28 @@ type Bucket interface { } type BatchTx interface { - ReadTx - UnsafeCreateBucket(bucket Bucket) - UnsafeDeleteBucket(bucket Bucket) - UnsafePut(bucket Bucket, key []byte, value []byte) - UnsafeSeqPut(bucket Bucket, key []byte, value []byte) - UnsafeDelete(bucket Bucket, key []byte) + Lock() + Unlock() // Commit commits a previous tx and begins a new writable one. Commit() // CommitAndStop commits the previous tx and does not create a new one. CommitAndStop() LockInsideApply() LockOutsideApply() + UnsafeReadWriter +} + +type UnsafeReadWriter interface { + UnsafeReader + UnsafeWriter +} + +type UnsafeWriter interface { + UnsafeCreateBucket(bucket Bucket) + UnsafeDeleteBucket(bucket Bucket) + UnsafePut(bucket Bucket, key []byte, value []byte) + UnsafeSeqPut(bucket Bucket, key []byte, value []byte) + UnsafeDelete(bucket Bucket, key []byte) } type batchTx struct { @@ -100,21 +113,8 @@ func (t *batchTx) Unlock() { t.Mutex.Unlock() } -// BatchTx interface embeds ReadTx interface. But RLock() and RUnlock() do not -// have appropriate semantics in BatchTx interface. Therefore should not be called. -// TODO: might want to decouple ReadTx and BatchTx - -func (t *batchTx) RLock() { - panic("unexpected RLock") -} - -func (t *batchTx) RUnlock() { - panic("unexpected RUnlock") -} - func (t *batchTx) UnsafeCreateBucket(bucket Bucket) { - _, err := t.tx.CreateBucket(bucket.Name()) - if err != nil && err != bolt.ErrBucketExists { + if _, err := t.tx.CreateBucketIfNotExists(bucket.Name()); err != nil { t.backend.lg.Fatal( "failed to create a bucket", zap.Stringer("bucket-name", bucket), @@ -126,7 +126,7 @@ func (t *batchTx) UnsafeCreateBucket(bucket Bucket) { func (t *batchTx) UnsafeDeleteBucket(bucket Bucket) { err := t.tx.DeleteBucket(bucket.Name()) - if err != nil && err != bolt.ErrBucketNotFound { + if err != nil && !errors.Is(err, bolterrors.ErrBucketNotFound) { t.backend.lg.Fatal( "failed to delete a bucket", zap.Stringer("bucket-name", bucket), @@ -310,6 +310,7 @@ func (t *batchTxBuffered) Unlock() { t.backend.readTx.Lock() // blocks txReadBuffer for writing. // gofail: var beforeWritebackBuf struct{} t.buf.writeback(&t.backend.readTx.buf) + // gofail: var afterWritebackBuf struct{} t.backend.readTx.Unlock() // We commit the transaction when the number of pending operations // reaches the configured limit(batchLimit) to prevent it from @@ -359,8 +360,11 @@ func (t *batchTxBuffered) commit(stop bool) { func (t *batchTxBuffered) unsafeCommit(stop bool) { if t.backend.hooks != nil { + // gofail: var commitBeforePreCommitHook struct{} t.backend.hooks.OnPreCommitUnsafe(t) + // gofail: var commitAfterPreCommitHook struct{} } + if t.backend.readTx.tx != nil { // wait all store read transactions using the current boltdb tx to finish, // then close the boltdb tx diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/backend/config_default.go b/vendor/go.etcd.io/etcd/server/v3/storage/backend/config_default.go similarity index 96% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/backend/config_default.go rename to vendor/go.etcd.io/etcd/server/v3/storage/backend/config_default.go index 847bd10fd78a4..fd57c7ca84cfe 100644 --- a/vendor/go.etcd.io/etcd/server/v3/mvcc/backend/config_default.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/backend/config_default.go @@ -13,7 +13,6 @@ // limitations under the License. //go:build !linux && !windows -// +build !linux,!windows package backend diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/backend/config_linux.go b/vendor/go.etcd.io/etcd/server/v3/storage/backend/config_linux.go similarity index 100% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/backend/config_linux.go rename to vendor/go.etcd.io/etcd/server/v3/storage/backend/config_linux.go diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/backend/config_windows.go b/vendor/go.etcd.io/etcd/server/v3/storage/backend/config_windows.go similarity index 98% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/backend/config_windows.go rename to vendor/go.etcd.io/etcd/server/v3/storage/backend/config_windows.go index ba6e5a1284ccb..7bb42f3a289cf 100644 --- a/vendor/go.etcd.io/etcd/server/v3/mvcc/backend/config_windows.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/backend/config_windows.go @@ -13,7 +13,6 @@ // limitations under the License. //go:build windows -// +build windows package backend diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/backend/doc.go b/vendor/go.etcd.io/etcd/server/v3/storage/backend/doc.go similarity index 100% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/backend/doc.go rename to vendor/go.etcd.io/etcd/server/v3/storage/backend/doc.go diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/backend/hooks.go b/vendor/go.etcd.io/etcd/server/v3/storage/backend/hooks.go similarity index 88% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/backend/hooks.go rename to vendor/go.etcd.io/etcd/server/v3/storage/backend/hooks.go index 9750828ef7b33..817d0c5eb5086 100644 --- a/vendor/go.etcd.io/etcd/server/v3/mvcc/backend/hooks.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/backend/hooks.go @@ -14,20 +14,20 @@ package backend -type HookFunc func(tx BatchTx) +type HookFunc func(tx UnsafeReadWriter) // Hooks allow to add additional logic executed during transaction lifetime. type Hooks interface { // OnPreCommitUnsafe is executed before Commit of transactions. // The given transaction is already locked. - OnPreCommitUnsafe(tx BatchTx) + OnPreCommitUnsafe(tx UnsafeReadWriter) } type hooks struct { onPreCommitUnsafe HookFunc } -func (h hooks) OnPreCommitUnsafe(tx BatchTx) { +func (h hooks) OnPreCommitUnsafe(tx UnsafeReadWriter) { h.onPreCommitUnsafe(tx) } diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/backend/metrics.go b/vendor/go.etcd.io/etcd/server/v3/storage/backend/metrics.go similarity index 100% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/backend/metrics.go rename to vendor/go.etcd.io/etcd/server/v3/storage/backend/metrics.go diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/backend/read_tx.go b/vendor/go.etcd.io/etcd/server/v3/storage/backend/read_tx.go similarity index 98% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/backend/read_tx.go rename to vendor/go.etcd.io/etcd/server/v3/storage/backend/read_tx.go index 56327d52ae6fe..4ca2621411c5c 100644 --- a/vendor/go.etcd.io/etcd/server/v3/mvcc/backend/read_tx.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/backend/read_tx.go @@ -26,11 +26,12 @@ import ( // is known to never overwrite any key so range is safe. type ReadTx interface { - Lock() - Unlock() RLock() RUnlock() + UnsafeReader +} +type UnsafeReader interface { UnsafeRange(bucket Bucket, key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte) UnsafeForEach(bucket Bucket, visitor func(k, v []byte) error) error } diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/backend/tx_buffer.go b/vendor/go.etcd.io/etcd/server/v3/storage/backend/tx_buffer.go similarity index 83% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/backend/tx_buffer.go rename to vendor/go.etcd.io/etcd/server/v3/storage/backend/tx_buffer.go index 667400248363c..821b300bfeff9 100644 --- a/vendor/go.etcd.io/etcd/server/v3/mvcc/backend/tx_buffer.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/backend/tx_buffer.go @@ -16,7 +16,11 @@ package backend import ( "bytes" + "encoding/hex" + "fmt" "sort" + + "go.etcd.io/etcd/client/pkg/v3/verify" ) const bucketBufferInitialSize = 512 @@ -50,7 +54,20 @@ func (txw *txWriteBuffer) put(bucket Bucket, k, v []byte) { } func (txw *txWriteBuffer) putSeq(bucket Bucket, k, v []byte) { - // TODO: Add (in tests?) verification whether k>b[len(b)] + // putSeq is only be called for the data in the Key bucket. The keys + // in the Key bucket should be monotonically increasing revisions. + verify.Verify(func() { + b, ok := txw.buckets[bucket.ID()] + if !ok || b.used == 0 { + return + } + + existingMaxKey := b.buf[b.used-1].key + if bytes.Compare(k, existingMaxKey) <= 0 { + panic(fmt.Sprintf("Broke the rule of monotonically increasing, existingMaxKey: %s, currentKey: %s", + hex.EncodeToString(existingMaxKey), hex.EncodeToString(k))) + } + }) txw.putInternal(bucket, k, v) } @@ -80,6 +97,9 @@ func (txw *txWriteBuffer) writeback(txr *txReadBuffer) { rb, ok := txr.buckets[k] if !ok { delete(txw.buckets, k) + if seq, ok := txw.bucket2seq[k]; ok && !seq { + wb.dedupe() + } txr.buckets[k] = wb continue } @@ -124,7 +144,7 @@ func (txr *txReadBuffer) unsafeCopy() txReadBuffer { bufVersion: 0, } for bucketName, bucket := range txr.txBuffer.buckets { - txrCopy.txBuffer.buckets[bucketName] = bucket.Copy() + txrCopy.txBuffer.buckets[bucketName] = bucket.CopyUsed() } return txrCopy } @@ -148,7 +168,7 @@ func newBucketBuffer() *bucketBuffer { func (bb *bucketBuffer) Range(key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte) { f := func(i int) bool { return bytes.Compare(bb.buf[i].key, key) >= 0 } idx := sort.Search(bb.used, f) - if idx < 0 { + if idx < 0 || idx >= bb.used { return nil, nil } if len(endKey) == 0 { @@ -201,10 +221,15 @@ func (bb *bucketBuffer) merge(bbsrc *bucketBuffer) { if bytes.Compare(bb.buf[(bb.used-bbsrc.used)-1].key, bbsrc.buf[0].key) < 0 { return } + bb.dedupe() +} +// dedupe removes duplicates, using only newest update +func (bb *bucketBuffer) dedupe() { + if bb.used <= 1 { + return + } sort.Stable(bb) - - // remove duplicates, using only newest update widx := 0 for ridx := 1; ridx < bb.used; ridx++ { if !bytes.Equal(bb.buf[ridx].key, bb.buf[widx].key) { @@ -221,11 +246,13 @@ func (bb *bucketBuffer) Less(i, j int) bool { } func (bb *bucketBuffer) Swap(i, j int) { bb.buf[i], bb.buf[j] = bb.buf[j], bb.buf[i] } -func (bb *bucketBuffer) Copy() *bucketBuffer { +func (bb *bucketBuffer) CopyUsed() *bucketBuffer { + verify.Assert(bb.used <= len(bb.buf), + "used (%d) should never be bigger than the length of buf (%d)", bb.used, len(bb.buf)) bbCopy := bucketBuffer{ - buf: make([]kv, len(bb.buf)), + buf: make([]kv, bb.used), used: bb.used, } - copy(bbCopy.buf, bb.buf) + copy(bbCopy.buf, bb.buf[:bb.used]) return &bbCopy } diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/backend/verify.go b/vendor/go.etcd.io/etcd/server/v3/storage/backend/verify.go similarity index 52% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/backend/verify.go rename to vendor/go.etcd.io/etcd/server/v3/storage/backend/verify.go index a6a0b8675ec52..a3aa165fe84f1 100644 --- a/vendor/go.etcd.io/etcd/server/v3/mvcc/backend/verify.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/backend/verify.go @@ -15,17 +15,18 @@ package backend import ( - "os" + "fmt" "runtime/debug" "strings" + "github.com/google/go-cmp/cmp" "go.uber.org/zap" + + "go.etcd.io/etcd/client/pkg/v3/verify" ) const ( - ENV_VERIFY = "ETCD_VERIFY" - ENV_VERIFY_ALL_VALUE = "all" - ENV_VERIFY_LOCK = "lock" + EnvVerifyValueLock verify.VerificationType = "lock" ) func ValidateCalledInsideApply(lg *zap.Logger) { @@ -56,7 +57,7 @@ func ValidateCalledInsideUnittest(lg *zap.Logger) { } func verifyLockEnabled() bool { - return os.Getenv(ENV_VERIFY) == ENV_VERIFY_ALL_VALUE || os.Getenv(ENV_VERIFY) == ENV_VERIFY_LOCK + return verify.IsVerificationEnabled(EnvVerifyValueLock) } func insideApply() bool { @@ -68,3 +69,41 @@ func insideUnittest() bool { stackTraceStr := string(debug.Stack()) return strings.Contains(stackTraceStr, "_test.go") && !strings.Contains(stackTraceStr, "tests/") } + +// VerifyBackendConsistency verifies data in ReadTx and BatchTx are consistent. +func VerifyBackendConsistency(b Backend, lg *zap.Logger, skipSafeRangeBucket bool, bucket ...Bucket) { + verify.Verify(func() { + if b == nil { + return + } + if lg != nil { + lg.Debug("verifyBackendConsistency", zap.Bool("skipSafeRangeBucket", skipSafeRangeBucket)) + } + b.BatchTx().LockOutsideApply() + defer b.BatchTx().Unlock() + b.ReadTx().RLock() + defer b.ReadTx().RUnlock() + for _, bkt := range bucket { + if skipSafeRangeBucket && bkt.IsSafeRangeBucket() { + continue + } + unsafeVerifyTxConsistency(b, bkt) + } + }) +} + +func unsafeVerifyTxConsistency(b Backend, bucket Bucket) { + dataFromWriteTxn := map[string]string{} + b.BatchTx().UnsafeForEach(bucket, func(k, v []byte) error { + dataFromWriteTxn[string(k)] = string(v) + return nil + }) + dataFromReadTxn := map[string]string{} + b.ReadTx().UnsafeForEach(bucket, func(k, v []byte) error { + dataFromReadTxn[string(k)] = string(v) + return nil + }) + if diff := cmp.Diff(dataFromWriteTxn, dataFromReadTxn); diff != "" { + panic(fmt.Sprintf("bucket %s data mismatch\nwrite TXN: %v\nread TXN: %v\ndiff: %s", bucket.String(), dataFromWriteTxn, dataFromReadTxn, diff)) + } +} diff --git a/vendor/go.etcd.io/etcd/server/v3/datadir/datadir.go b/vendor/go.etcd.io/etcd/server/v3/storage/datadir/datadir.go similarity index 84% rename from vendor/go.etcd.io/etcd/server/v3/datadir/datadir.go rename to vendor/go.etcd.io/etcd/server/v3/storage/datadir/datadir.go index fa4c51ad1a2f0..ced7d767fe548 100644 --- a/vendor/go.etcd.io/etcd/server/v3/datadir/datadir.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/datadir/datadir.go @@ -31,7 +31,16 @@ func ToSnapDir(dataDir string) string { return filepath.Join(ToMemberDir(dataDir), snapDirSegment) } +// ToWalDir returns the directory path for the member's WAL. +// +// Deprecated: use ToWALDir instead. +// +//revive:disable-next-line:var-naming func ToWalDir(dataDir string) string { + return ToWALDir(dataDir) +} + +func ToWALDir(dataDir string) string { return filepath.Join(ToMemberDir(dataDir), walDirSegment) } diff --git a/vendor/go.etcd.io/etcd/server/v3/datadir/doc.go b/vendor/go.etcd.io/etcd/server/v3/storage/datadir/doc.go similarity index 100% rename from vendor/go.etcd.io/etcd/server/v3/datadir/doc.go rename to vendor/go.etcd.io/etcd/server/v3/storage/datadir/doc.go diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/hooks.go b/vendor/go.etcd.io/etcd/server/v3/storage/hooks.go new file mode 100644 index 0000000000000..ffec71c0bca22 --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/storage/hooks.go @@ -0,0 +1,60 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "sync" + + "go.uber.org/zap" + + "go.etcd.io/etcd/server/v3/etcdserver/cindex" + "go.etcd.io/etcd/server/v3/storage/backend" + "go.etcd.io/etcd/server/v3/storage/schema" + "go.etcd.io/raft/v3/raftpb" +) + +type BackendHooks struct { + indexer cindex.ConsistentIndexer + lg *zap.Logger + + // confState to Be written in the next submitted Backend transaction (if dirty) + confState raftpb.ConfState + // first write changes it to 'dirty'. false by default, so + // not initialized `confState` is meaningless. + confStateDirty bool + confStateLock sync.Mutex +} + +func NewBackendHooks(lg *zap.Logger, indexer cindex.ConsistentIndexer) *BackendHooks { + return &BackendHooks{lg: lg, indexer: indexer} +} + +func (bh *BackendHooks) OnPreCommitUnsafe(tx backend.UnsafeReadWriter) { + bh.indexer.UnsafeSave(tx) + bh.confStateLock.Lock() + defer bh.confStateLock.Unlock() + if bh.confStateDirty { + schema.MustUnsafeSaveConfStateToBackend(bh.lg, tx, &bh.confState) + // save bh.confState + bh.confStateDirty = false + } +} + +func (bh *BackendHooks) SetConfState(confState *raftpb.ConfState) { + bh.confStateLock.Lock() + defer bh.confStateLock.Unlock() + bh.confState = *confState + bh.confStateDirty = true +} diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/metrics.go b/vendor/go.etcd.io/etcd/server/v3/storage/metrics.go new file mode 100644 index 0000000000000..cb7f87057f91c --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/storage/metrics.go @@ -0,0 +1,30 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "github.com/prometheus/client_golang/prometheus" +) + +var quotaBackendBytes = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "etcd", + Subsystem: "server", + Name: "quota_backend_bytes", + Help: "Current backend storage quota size in bytes.", +}) + +func init() { + prometheus.MustRegister(quotaBackendBytes) +} diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/doc.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/doc.go similarity index 100% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/doc.go rename to vendor/go.etcd.io/etcd/server/v3/storage/mvcc/doc.go diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/hash.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/hash.go similarity index 77% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/hash.go rename to vendor/go.etcd.io/etcd/server/v3/storage/mvcc/hash.go index 8af5d90a46f64..31846f6116ee7 100644 --- a/vendor/go.etcd.io/etcd/server/v3/mvcc/hash.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/hash.go @@ -20,18 +20,19 @@ import ( "sort" "sync" - "go.etcd.io/etcd/server/v3/mvcc/backend" - "go.etcd.io/etcd/server/v3/mvcc/buckets" "go.uber.org/zap" + + "go.etcd.io/etcd/server/v3/storage/backend" + "go.etcd.io/etcd/server/v3/storage/schema" ) const ( hashStorageMaxSize = 10 ) -func unsafeHashByRev(tx backend.ReadTx, compactRevision, revision int64, keep map[revision]struct{}) (KeyValueHash, error) { +func unsafeHashByRev(tx backend.UnsafeReader, compactRevision, revision int64, keep map[Revision]struct{}) (KeyValueHash, error) { h := newKVHasher(compactRevision, revision, keep) - err := tx.UnsafeForEach(buckets.Key, func(k, v []byte) error { + err := tx.UnsafeForEach(schema.Key, func(k, v []byte) error { h.WriteKeyValue(k, v) return nil }) @@ -42,12 +43,12 @@ type kvHasher struct { hash hash.Hash32 compactRevision int64 revision int64 - keep map[revision]struct{} + keep map[Revision]struct{} } -func newKVHasher(compactRev, rev int64, keep map[revision]struct{}) kvHasher { +func newKVHasher(compactRev, rev int64, keep map[Revision]struct{}) kvHasher { h := crc32.New(crc32.MakeTable(crc32.Castagnoli)) - h.Write(buckets.Key.Name()) + h.Write(schema.Key.Name()) return kvHasher{ hash: h, compactRevision: compactRev, @@ -57,15 +58,15 @@ func newKVHasher(compactRev, rev int64, keep map[revision]struct{}) kvHasher { } func (h *kvHasher) WriteKeyValue(k, v []byte) { - kr := bytesToRev(k) - upper := revision{main: h.revision + 1} + kr := BytesToRev(k) + upper := Revision{Main: h.revision + 1} if !upper.GreaterThan(kr) { return } - isTombstoneRev := isTombstone(k) + isTombstone := BytesToBucketKey(k).tombstone - lower := revision{main: h.compactRevision + 1} + lower := Revision{Main: h.compactRevision + 1} // skip revisions that are scheduled for deletion // due to compacting; don't skip if there isn't one. if lower.GreaterThan(kr) && len(h.keep) > 0 { @@ -80,7 +81,7 @@ func (h *kvHasher) WriteKeyValue(k, v []byte) { // delete it. So we should skip the tombstone in such cases when // computing the hash to ensure that both older and newer versions // can always generate the same hash values. - if kr.main == h.compactRevision && isTombstoneRev { + if kr.Main == h.compactRevision && isTombstone { return } @@ -99,13 +100,18 @@ type KeyValueHash struct { } type HashStorage interface { - // Hash computes the hash of the KV's backend. + // Hash computes the hash of the whole backend keyspace, + // including key, lease, and other buckets in storage. + // This is designed for testing ONLY! + // Do not rely on this in production with ongoing transactions, + // since Hash operation does not hold MVCC locks. + // Use "HashByRev" method instead for "key" bucket consistency checks. Hash() (hash uint32, revision int64, err error) // HashByRev computes the hash of all MVCC revisions up to a given revision. HashByRev(rev int64) (hash KeyValueHash, currentRev int64, err error) - // Store adds hash value in local cache, allowing it can be returned by HashByRev. + // Store adds hash value in local cache, allowing it to be returned by HashByRev. Store(valueHash KeyValueHash) // Hashes returns list of up to `hashStorageMaxSize` newest previously stored hashes. @@ -119,7 +125,7 @@ type hashStorage struct { lg *zap.Logger } -func newHashStorage(lg *zap.Logger, s *store) *hashStorage { +func NewHashStorage(lg *zap.Logger, s *store) HashStorage { return &hashStorage{ store: s, lg: lg, diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/index.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/index.go similarity index 54% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/index.go rename to vendor/go.etcd.io/etcd/server/v3/storage/mvcc/index.go index 0a5cb005160f3..f300831b2936f 100644 --- a/vendor/go.etcd.io/etcd/server/v3/mvcc/index.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/index.go @@ -15,7 +15,6 @@ package mvcc import ( - "sort" "sync" "github.com/google/btree" @@ -23,15 +22,14 @@ import ( ) type index interface { - Get(key []byte, atRev int64) (rev, created revision, ver int64, err error) - Range(key, end []byte, atRev int64) ([][]byte, []revision) - Revisions(key, end []byte, atRev int64, limit int) ([]revision, int) + Get(key []byte, atRev int64) (rev, created Revision, ver int64, err error) + Range(key, end []byte, atRev int64) ([][]byte, []Revision) + Revisions(key, end []byte, atRev int64, limit int) ([]Revision, int) CountRevisions(key, end []byte, atRev int64) int - Put(key []byte, rev revision) - Tombstone(key []byte, rev revision) error - RangeSince(key, end []byte, rev int64) []revision - Compact(rev int64) map[revision]struct{} - Keep(rev int64) map[revision]struct{} + Put(key []byte, rev Revision) + Tombstone(key []byte, rev Revision) error + Compact(rev int64) map[Revision]struct{} + Keep(rev int64) map[Revision]struct{} Equal(b index) bool Insert(ki *keyIndex) @@ -40,38 +38,43 @@ type index interface { type treeIndex struct { sync.RWMutex - tree *btree.BTree + tree *btree.BTreeG[*keyIndex] lg *zap.Logger } func newTreeIndex(lg *zap.Logger) index { return &treeIndex{ - tree: btree.New(32), - lg: lg, + tree: btree.NewG(32, func(aki *keyIndex, bki *keyIndex) bool { + return aki.Less(bki) + }), + lg: lg, } } -func (ti *treeIndex) Put(key []byte, rev revision) { +func (ti *treeIndex) Put(key []byte, rev Revision) { keyi := &keyIndex{key: key} ti.Lock() defer ti.Unlock() - item := ti.tree.Get(keyi) - if item == nil { - keyi.put(ti.lg, rev.main, rev.sub) + okeyi, ok := ti.tree.Get(keyi) + if !ok { + keyi.put(ti.lg, rev.Main, rev.Sub) ti.tree.ReplaceOrInsert(keyi) return } - okeyi := item.(*keyIndex) - okeyi.put(ti.lg, rev.main, rev.sub) + okeyi.put(ti.lg, rev.Main, rev.Sub) } -func (ti *treeIndex) Get(key []byte, atRev int64) (modified, created revision, ver int64, err error) { - keyi := &keyIndex{key: key} +func (ti *treeIndex) Get(key []byte, atRev int64) (modified, created Revision, ver int64, err error) { ti.RLock() defer ti.RUnlock() + return ti.unsafeGet(key, atRev) +} + +func (ti *treeIndex) unsafeGet(key []byte, atRev int64) (modified, created Revision, ver int64, err error) { + keyi := &keyIndex{key: key} if keyi = ti.keyIndex(keyi); keyi == nil { - return revision{}, revision{}, 0, ErrRevisionNotFound + return Revision{}, Revision{}, 0, ErrRevisionNotFound } return keyi.get(ti.lg, atRev) } @@ -83,38 +86,41 @@ func (ti *treeIndex) KeyIndex(keyi *keyIndex) *keyIndex { } func (ti *treeIndex) keyIndex(keyi *keyIndex) *keyIndex { - if item := ti.tree.Get(keyi); item != nil { - return item.(*keyIndex) + if ki, ok := ti.tree.Get(keyi); ok { + return ki } return nil } -func (ti *treeIndex) visit(key, end []byte, f func(ki *keyIndex) bool) { +func (ti *treeIndex) unsafeVisit(key, end []byte, f func(ki *keyIndex) bool) { keyi, endi := &keyIndex{key: key}, &keyIndex{key: end} - ti.RLock() - defer ti.RUnlock() - - ti.tree.AscendGreaterOrEqual(keyi, func(item btree.Item) bool { + ti.tree.AscendGreaterOrEqual(keyi, func(item *keyIndex) bool { if len(endi.key) > 0 && !item.Less(endi) { return false } - if !f(item.(*keyIndex)) { + if !f(item) { return false } return true }) } -func (ti *treeIndex) Revisions(key, end []byte, atRev int64, limit int) (revs []revision, total int) { +// Revisions returns limited number of revisions from key(included) to end(excluded) +// at the given rev. The returned slice is sorted in the order of key. There is no limit if limit <= 0. +// The second return parameter isn't capped by the limit and reflects the total number of revisions. +func (ti *treeIndex) Revisions(key, end []byte, atRev int64, limit int) (revs []Revision, total int) { + ti.RLock() + defer ti.RUnlock() + if end == nil { - rev, _, _, err := ti.Get(key, atRev) + rev, _, _, err := ti.unsafeGet(key, atRev) if err != nil { return nil, 0 } - return []revision{rev}, 1 + return []Revision{rev}, 1 } - ti.visit(key, end, func(ki *keyIndex) bool { + ti.unsafeVisit(key, end, func(ki *keyIndex) bool { if rev, _, _, err := ki.get(ti.lg, atRev); err == nil { if limit <= 0 || len(revs) < limit { revs = append(revs, rev) @@ -126,16 +132,21 @@ func (ti *treeIndex) Revisions(key, end []byte, atRev int64, limit int) (revs [] return revs, total } +// CountRevisions returns the number of revisions +// from key(included) to end(excluded) at the given rev. func (ti *treeIndex) CountRevisions(key, end []byte, atRev int64) int { + ti.RLock() + defer ti.RUnlock() + if end == nil { - _, _, _, err := ti.Get(key, atRev) + _, _, _, err := ti.unsafeGet(key, atRev) if err != nil { return 0 } return 1 } total := 0 - ti.visit(key, end, func(ki *keyIndex) bool { + ti.unsafeVisit(key, end, func(ki *keyIndex) bool { if _, _, _, err := ki.get(ti.lg, atRev); err == nil { total++ } @@ -144,15 +155,18 @@ func (ti *treeIndex) CountRevisions(key, end []byte, atRev int64) int { return total } -func (ti *treeIndex) Range(key, end []byte, atRev int64) (keys [][]byte, revs []revision) { +func (ti *treeIndex) Range(key, end []byte, atRev int64) (keys [][]byte, revs []Revision) { + ti.RLock() + defer ti.RUnlock() + if end == nil { - rev, _, _, err := ti.Get(key, atRev) + rev, _, _, err := ti.unsafeGet(key, atRev) if err != nil { return nil, nil } - return [][]byte{key}, []revision{rev} + return [][]byte{key}, []Revision{rev} } - ti.visit(key, end, func(ki *keyIndex) bool { + ti.unsafeVisit(key, end, func(ki *keyIndex) bool { if rev, _, _, err := ki.get(ti.lg, atRev); err == nil { revs = append(revs, rev) keys = append(keys, ki.key) @@ -162,69 +176,34 @@ func (ti *treeIndex) Range(key, end []byte, atRev int64) (keys [][]byte, revs [] return keys, revs } -func (ti *treeIndex) Tombstone(key []byte, rev revision) error { +func (ti *treeIndex) Tombstone(key []byte, rev Revision) error { keyi := &keyIndex{key: key} ti.Lock() defer ti.Unlock() - item := ti.tree.Get(keyi) - if item == nil { + ki, ok := ti.tree.Get(keyi) + if !ok { return ErrRevisionNotFound } - ki := item.(*keyIndex) - return ki.tombstone(ti.lg, rev.main, rev.sub) -} - -// RangeSince returns all revisions from key(including) to end(excluding) -// at or after the given rev. The returned slice is sorted in the order -// of revision. -func (ti *treeIndex) RangeSince(key, end []byte, rev int64) []revision { - keyi := &keyIndex{key: key} - - ti.RLock() - defer ti.RUnlock() - - if end == nil { - item := ti.tree.Get(keyi) - if item == nil { - return nil - } - keyi = item.(*keyIndex) - return keyi.since(ti.lg, rev) - } - - endi := &keyIndex{key: end} - var revs []revision - ti.tree.AscendGreaterOrEqual(keyi, func(item btree.Item) bool { - if len(endi.key) > 0 && !item.Less(endi) { - return false - } - curKeyi := item.(*keyIndex) - revs = append(revs, curKeyi.since(ti.lg, rev)...) - return true - }) - sort.Sort(revisions(revs)) - - return revs + return ki.tombstone(ti.lg, rev.Main, rev.Sub) } -func (ti *treeIndex) Compact(rev int64) map[revision]struct{} { - available := make(map[revision]struct{}) +func (ti *treeIndex) Compact(rev int64) map[Revision]struct{} { + available := make(map[Revision]struct{}) ti.lg.Info("compact tree index", zap.Int64("revision", rev)) ti.Lock() clone := ti.tree.Clone() ti.Unlock() - clone.Ascend(func(item btree.Item) bool { - keyi := item.(*keyIndex) - //Lock is needed here to prevent modification to the keyIndex while - //compaction is going on or revision added to empty before deletion + clone.Ascend(func(keyi *keyIndex) bool { + // Lock is needed here to prevent modification to the keyIndex while + // compaction is going on or revision added to empty before deletion ti.Lock() keyi.compact(ti.lg, rev, available) if keyi.isEmpty() { - item := ti.tree.Delete(keyi) - if item == nil { + _, ok := ti.tree.Delete(keyi) + if !ok { ti.lg.Panic("failed to delete during compaction") } } @@ -235,12 +214,11 @@ func (ti *treeIndex) Compact(rev int64) map[revision]struct{} { } // Keep finds all revisions to be kept for a Compaction at the given rev. -func (ti *treeIndex) Keep(rev int64) map[revision]struct{} { - available := make(map[revision]struct{}) +func (ti *treeIndex) Keep(rev int64) map[Revision]struct{} { + available := make(map[Revision]struct{}) ti.RLock() defer ti.RUnlock() - ti.tree.Ascend(func(i btree.Item) bool { - keyi := i.(*keyIndex) + ti.tree.Ascend(func(keyi *keyIndex) bool { keyi.keep(rev, available) return true }) @@ -256,9 +234,8 @@ func (ti *treeIndex) Equal(bi index) bool { equal := true - ti.tree.Ascend(func(item btree.Item) bool { - aki := item.(*keyIndex) - bki := b.tree.Get(item).(*keyIndex) + ti.tree.Ascend(func(aki *keyIndex) bool { + bki, _ := b.tree.Get(aki) if !aki.equal(bki) { equal = false return false diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/key_index.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/key_index.go similarity index 84% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/key_index.go rename to vendor/go.etcd.io/etcd/server/v3/storage/mvcc/key_index.go index d38b0933d9247..6ba7d2732ee5c 100644 --- a/vendor/go.etcd.io/etcd/server/v3/mvcc/key_index.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/key_index.go @@ -19,7 +19,6 @@ import ( "errors" "fmt" - "github.com/google/btree" "go.uber.org/zap" ) @@ -30,14 +29,14 @@ var ( // keyIndex stores the revisions of a key in the backend. // Each keyIndex has at least one key generation. // Each generation might have several key versions. -// Tombstone on a key appends an tombstone version at the end +// Tombstone on a key appends a tombstone version at the end // of the current generation and creates a new empty generation. // Each version of a key has an index pointing to the backend. // // For example: put(1.0);put(2.0);tombstone(3.0);put(4.0);tombstone(5.0) on key "foo" // generate a keyIndex: // key: "foo" -// rev: 5 +// modified: 5 // generations: // // {empty} @@ -75,21 +74,21 @@ var ( // {empty} -> key SHOULD be removed. type keyIndex struct { key []byte - modified revision // the main rev of the last modification + modified Revision // the main rev of the last modification generations []generation } // put puts a revision to the keyIndex. func (ki *keyIndex) put(lg *zap.Logger, main int64, sub int64) { - rev := revision{main: main, sub: sub} + rev := Revision{Main: main, Sub: sub} if !rev.GreaterThan(ki.modified) { lg.Panic( "'put' with an unexpected smaller revision", - zap.Int64("given-revision-main", rev.main), - zap.Int64("given-revision-sub", rev.sub), - zap.Int64("modified-revision-main", ki.modified.main), - zap.Int64("modified-revision-sub", ki.modified.sub), + zap.Int64("given-revision-main", rev.Main), + zap.Int64("given-revision-sub", rev.Sub), + zap.Int64("modified-revision-main", ki.modified.Main), + zap.Int64("modified-revision-sub", ki.modified.Sub), ) } if len(ki.generations) == 0 { @@ -105,7 +104,7 @@ func (ki *keyIndex) put(lg *zap.Logger, main int64, sub int64) { ki.modified = rev } -func (ki *keyIndex) restore(lg *zap.Logger, created, modified revision, ver int64) { +func (ki *keyIndex) restore(lg *zap.Logger, created, modified Revision, ver int64) { if len(ki.generations) != 0 { lg.Panic( "'restore' got an unexpected non-empty generations", @@ -114,7 +113,7 @@ func (ki *keyIndex) restore(lg *zap.Logger, created, modified revision, ver int6 } ki.modified = modified - g := generation{created: created, ver: ver, revs: []revision{modified}} + g := generation{created: created, ver: ver, revs: []Revision{modified}} ki.generations = append(ki.generations, g) keysGauge.Inc() } @@ -139,8 +138,8 @@ func (ki *keyIndex) tombstone(lg *zap.Logger, main int64, sub int64) error { } // get gets the modified, created revision and version of the key that satisfies the given atRev. -// Rev must be higher than or equal to the given atRev. -func (ki *keyIndex) get(lg *zap.Logger, atRev int64) (modified, created revision, ver int64, err error) { +// Rev must be smaller than or equal to the given atRev. +func (ki *keyIndex) get(lg *zap.Logger, atRev int64) (modified, created Revision, ver int64, err error) { if ki.isEmpty() { lg.Panic( "'get' got an unexpected empty keyIndex", @@ -149,28 +148,28 @@ func (ki *keyIndex) get(lg *zap.Logger, atRev int64) (modified, created revision } g := ki.findGeneration(atRev) if g.isEmpty() { - return revision{}, revision{}, 0, ErrRevisionNotFound + return Revision{}, Revision{}, 0, ErrRevisionNotFound } - n := g.walk(func(rev revision) bool { return rev.main > atRev }) + n := g.walk(func(rev Revision) bool { return rev.Main > atRev }) if n != -1 { return g.revs[n], g.created, g.ver - int64(len(g.revs)-n-1), nil } - return revision{}, revision{}, 0, ErrRevisionNotFound + return Revision{}, Revision{}, 0, ErrRevisionNotFound } // since returns revisions since the given rev. Only the revision with the // largest sub revision will be returned if multiple revisions have the same // main revision. -func (ki *keyIndex) since(lg *zap.Logger, rev int64) []revision { +func (ki *keyIndex) since(lg *zap.Logger, rev int64) []Revision { if ki.isEmpty() { lg.Panic( "'since' got an unexpected empty keyIndex", zap.String("key", string(ki.key)), ) } - since := revision{rev, 0} + since := Revision{Main: rev} var gi int // find the generations to start checking for gi = len(ki.generations) - 1; gi > 0; gi-- { @@ -183,21 +182,21 @@ func (ki *keyIndex) since(lg *zap.Logger, rev int64) []revision { } } - var revs []revision + var revs []Revision var last int64 for ; gi < len(ki.generations); gi++ { for _, r := range ki.generations[gi].revs { if since.GreaterThan(r) { continue } - if r.main == last { + if r.Main == last { // replace the revision with a new one that has higher sub value, // because the original one should not be seen by external revs[len(revs)-1] = r continue } revs = append(revs, r) - last = r.main + last = r.Main } } return revs @@ -206,7 +205,7 @@ func (ki *keyIndex) since(lg *zap.Logger, rev int64) []revision { // compact compacts a keyIndex by removing the versions with smaller or equal // revision than the given atRev except the largest one. // If a generation becomes empty during compaction, it will be removed. -func (ki *keyIndex) compact(lg *zap.Logger, atRev int64, available map[revision]struct{}) { +func (ki *keyIndex) compact(lg *zap.Logger, atRev int64, available map[Revision]struct{}) { if ki.isEmpty() { lg.Panic( "'compact' got an unexpected empty keyIndex", @@ -229,7 +228,7 @@ func (ki *keyIndex) compact(lg *zap.Logger, atRev int64, available map[revision] } // keep finds the revision to be kept if compact is called at given atRev. -func (ki *keyIndex) keep(atRev int64, available map[revision]struct{}) { +func (ki *keyIndex) keep(atRev int64, available map[Revision]struct{}) { if ki.isEmpty() { return } @@ -249,11 +248,11 @@ func (ki *keyIndex) keep(atRev int64, available map[revision]struct{}) { } } -func (ki *keyIndex) doCompact(atRev int64, available map[revision]struct{}) (genIdx int, revIndex int) { +func (ki *keyIndex) doCompact(atRev int64, available map[Revision]struct{}) (genIdx int, revIndex int) { // walk until reaching the first revision smaller or equal to "atRev", // and add the revision to the available map - f := func(rev revision) bool { - if rev.main <= atRev { + f := func(rev Revision) bool { + if rev.Main <= atRev { available[rev] = struct{}{} return false } @@ -263,7 +262,7 @@ func (ki *keyIndex) doCompact(atRev int64, available map[revision]struct{}) (gen genIdx, g := 0, &ki.generations[0] // find first generation includes atRev or created after atRev for genIdx < len(ki.generations)-1 { - if tomb := g.revs[len(g.revs)-1].main; tomb >= atRev { + if tomb := g.revs[len(g.revs)-1].Main; tomb >= atRev { break } genIdx++ @@ -293,11 +292,11 @@ func (ki *keyIndex) findGeneration(rev int64) *generation { } g := ki.generations[cg] if cg != lastg { - if tomb := g.revs[len(g.revs)-1].main; tomb <= rev { + if tomb := g.revs[len(g.revs)-1].Main; tomb <= rev { return nil } } - if g.revs[0].main <= rev { + if g.revs[0].Main <= rev { return &ki.generations[cg] } cg-- @@ -305,8 +304,8 @@ func (ki *keyIndex) findGeneration(rev int64) *generation { return nil } -func (ki *keyIndex) Less(b btree.Item) bool { - return bytes.Compare(ki.key, b.(*keyIndex).key) == -1 +func (ki *keyIndex) Less(bki *keyIndex) bool { + return bytes.Compare(ki.key, bki.key) == -1 } func (ki *keyIndex) equal(b *keyIndex) bool { @@ -339,8 +338,8 @@ func (ki *keyIndex) String() string { // generation contains multiple revisions of a key. type generation struct { ver int64 - created revision // when the generation is created (put in first revision). - revs []revision + created Revision // when the generation is created (put in first revision). + revs []Revision } func (g *generation) isEmpty() bool { return g == nil || len(g.revs) == 0 } @@ -350,7 +349,7 @@ func (g *generation) isEmpty() bool { return g == nil || len(g.revs) == 0 } // walk returns until: 1. it finishes walking all pairs 2. the function returns false. // walk returns the position at where it stopped. If it stopped after // finishing walking, -1 will be returned. -func (g *generation) walk(f func(rev revision) bool) int { +func (g *generation) walk(f func(rev Revision) bool) int { l := len(g.revs) for i := range g.revs { ok := f(g.revs[l-i-1]) diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/kv.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/kv.go similarity index 99% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/kv.go rename to vendor/go.etcd.io/etcd/server/v3/storage/mvcc/kv.go index 109b0d7ccb20b..6250bb91198bb 100644 --- a/vendor/go.etcd.io/etcd/server/v3/mvcc/kv.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/kv.go @@ -20,7 +20,7 @@ import ( "go.etcd.io/etcd/api/v3/mvccpb" "go.etcd.io/etcd/pkg/v3/traceutil" "go.etcd.io/etcd/server/v3/lease" - "go.etcd.io/etcd/server/v3/mvcc/backend" + "go.etcd.io/etcd/server/v3/storage/backend" ) type RangeOptions struct { diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/kv_view.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/kv_view.go similarity index 100% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/kv_view.go rename to vendor/go.etcd.io/etcd/server/v3/storage/mvcc/kv_view.go diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/kvstore.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/kvstore.go similarity index 82% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/kvstore.go rename to vendor/go.etcd.io/etcd/server/v3/storage/mvcc/kvstore.go index fb81af6ec747f..9ee73cce2e94e 100644 --- a/vendor/go.etcd.io/etcd/server/v3/mvcc/kvstore.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/kvstore.go @@ -22,33 +22,22 @@ import ( "sync" "time" + "go.uber.org/zap" + "go.etcd.io/etcd/api/v3/mvccpb" + "go.etcd.io/etcd/client/pkg/v3/verify" "go.etcd.io/etcd/pkg/v3/schedule" "go.etcd.io/etcd/pkg/v3/traceutil" "go.etcd.io/etcd/server/v3/lease" - "go.etcd.io/etcd/server/v3/mvcc/backend" - "go.etcd.io/etcd/server/v3/mvcc/buckets" - - "go.uber.org/zap" + "go.etcd.io/etcd/server/v3/storage/backend" + "go.etcd.io/etcd/server/v3/storage/schema" ) var ( - scheduledCompactKeyName = []byte("scheduledCompactRev") - finishedCompactKeyName = []byte("finishedCompactRev") - ErrCompacted = errors.New("mvcc: required revision has been compacted") ErrFutureRev = errors.New("mvcc: required revision is a future revision") ) -const ( - // markedRevBytesLen is the byte length of marked revision. - // The first `revBytesLen` bytes represents a normal revision. The last - // one byte is the mark. - markedRevBytesLen = revBytesLen + 1 - markBytePosition = markedRevBytesLen - 1 - markTombstone byte = 't' -) - var restoreChunkKeys = 10000 // non-const for testing var defaultCompactionBatchLimit = 1000 var defaultCompactionSleepInterval = 10 * time.Millisecond @@ -111,13 +100,13 @@ func NewStore(lg *zap.Logger, b backend.Backend, le lease.Lessor, cfg StoreConfi currentRev: 1, compactMainRev: -1, - fifoSched: schedule.NewFIFOScheduler(), + fifoSched: schedule.NewFIFOScheduler(lg), stopc: make(chan struct{}), lg: lg, } - s.hashes = newHashStorage(lg, s) + s.hashes = NewHashStorage(lg, s) s.ReadView = &readView{s} s.WriteView = &writeView{s} if s.le != nil { @@ -126,8 +115,8 @@ func NewStore(lg *zap.Logger, b backend.Backend, le lease.Lessor, cfg StoreConfi tx := s.b.BatchTx() tx.LockOutsideApply() - tx.UnsafeCreateBucket(buckets.Key) - tx.UnsafeCreateBucket(buckets.Meta) + tx.UnsafeCreateBucket(schema.Key) + schema.UnsafeCreateMetaBucket(tx) tx.Unlock() s.b.ForceCommit() @@ -146,12 +135,12 @@ func (s *store) compactBarrier(ctx context.Context, ch chan struct{}) { select { case <-s.stopc: default: - // fix deadlock in mvcc,for more information, please refer to pr 11817. + // fix deadlock in mvcc, for more information, please refer to pr 11817. // s.stopc is only updated in restore operation, which is called by apply // snapshot call, compaction and apply snapshot requests are serialized by // raft, and do not happen at the same time. s.mu.Lock() - f := func(ctx context.Context) { s.compactBarrier(ctx, ch) } + f := schedule.NewJob("kvstore_compactBarrier", func(ctx context.Context) { s.compactBarrier(ctx, ch) }) s.fifoSched.Schedule(f) s.mu.Unlock() } @@ -165,7 +154,7 @@ func (s *store) hash() (hash uint32, revision int64, err error) { start := time.Now() s.b.ForceCommit() - h, err := s.b.Hash(buckets.DefaultIgnores) + h, err := s.b.Hash(schema.DefaultIgnores) hashSec.Observe(time.Since(start).Seconds()) return h, s.currentRev, err @@ -205,7 +194,7 @@ func (s *store) updateCompactRev(rev int64) (<-chan struct{}, int64, error) { s.revMu.Lock() if rev <= s.compactMainRev { ch := make(chan struct{}) - f := func(ctx context.Context) { s.compactBarrier(ctx, ch) } + f := schedule.NewJob("kvstore_updateCompactRev_compactBarrier", func(ctx context.Context) { s.compactBarrier(ctx, ch) }) s.fifoSched.Schedule(f) s.revMu.Unlock() return ch, 0, ErrCompacted @@ -217,15 +206,11 @@ func (s *store) updateCompactRev(rev int64) (<-chan struct{}, int64, error) { compactMainRev := s.compactMainRev s.compactMainRev = rev - rbytes := newRevBytes() - revToBytes(revision{main: rev}, rbytes) - - tx := s.b.BatchTx() - tx.LockInsideApply() - tx.UnsafePut(buckets.Meta, scheduledCompactKeyName, rbytes) - tx.Unlock() + SetScheduledCompact(s.b.BatchTx(), rev) // ensure that desired compaction is persisted + // gofail: var compactBeforeCommitScheduledCompact struct{} s.b.ForceCommit() + // gofail: var compactAfterCommitScheduledCompact struct{} s.revMu.Unlock() @@ -235,8 +220,8 @@ func (s *store) updateCompactRev(rev int64) (<-chan struct{}, int64, error) { // checkPrevCompactionCompleted checks whether the previous scheduled compaction is completed. func (s *store) checkPrevCompactionCompleted() bool { tx := s.b.ReadTx() - tx.Lock() - defer tx.Unlock() + tx.RLock() + defer tx.RUnlock() scheduledCompact, scheduledCompactFound := UnsafeReadScheduledCompact(tx) finishedCompact, finishedCompactFound := UnsafeReadFinishedCompact(tx) return scheduledCompact == finishedCompact && scheduledCompactFound == finishedCompactFound @@ -244,7 +229,7 @@ func (s *store) checkPrevCompactionCompleted() bool { func (s *store) compact(trace *traceutil.Trace, rev, prevCompactRev int64, prevCompactionCompleted bool) <-chan struct{} { ch := make(chan struct{}) - var j = func(ctx context.Context) { + j := schedule.NewJob("kvstore_compact", func(ctx context.Context) { if ctx.Err() != nil { s.compactBarrier(ctx, ch) return @@ -263,7 +248,7 @@ func (s *store) compact(trace *traceutil.Trace, rev, prevCompactRev int64, prevC s.lg.Info("previous compaction was interrupted, skip storing compaction hash value") } close(ch) - } + }) s.fifoSched.Schedule(j) trace.Step("schedule compaction") @@ -282,7 +267,6 @@ func (s *store) compactLockfree(rev int64) (<-chan struct{}, error) { func (s *store) Compact(trace *traceutil.Trace, rev int64) (<-chan struct{}, error) { s.mu.Lock() - prevCompactionCompleted := s.checkPrevCompactionCompleted() ch, prevCompactRev, err := s.updateCompactRev(rev) trace.Step("check and update compact revision") @@ -319,24 +303,25 @@ func (s *store) Restore(b backend.Backend) error { s.revMu.Unlock() } - s.fifoSched = schedule.NewFIFOScheduler() + s.fifoSched = schedule.NewFIFOScheduler(s.lg) s.stopc = make(chan struct{}) return s.restore() } +//nolint:unparam func (s *store) restore() error { s.setupMetricsReporter() - min, max := newRevBytes(), newRevBytes() - revToBytes(revision{main: 1}, min) - revToBytes(revision{main: math.MaxInt64, sub: math.MaxInt64}, max) + min, max := NewRevBytes(), NewRevBytes() + min = RevToBytes(Revision{Main: 1}, min) + max = RevToBytes(Revision{Main: math.MaxInt64, Sub: math.MaxInt64}, max) keyToLease := make(map[string]lease.LeaseID) // restore index tx := s.b.ReadTx() - tx.Lock() + tx.RLock() finishedCompact, found := UnsafeReadFinishedCompact(tx) if found { @@ -345,19 +330,17 @@ func (s *store) restore() error { s.lg.Info( "restored last compact revision", - zap.Stringer("meta-bucket-name", buckets.Meta), - zap.String("meta-bucket-name-key", string(finishedCompactKeyName)), + zap.String("meta-bucket-name-key", string(schema.FinishedCompactKeyName)), zap.Int64("restored-compact-revision", s.compactMainRev), ) s.revMu.Unlock() } scheduledCompact, _ := UnsafeReadScheduledCompact(tx) - // index keys concurrently as they're loaded in from tx keysGauge.Set(0) rkvc, revc := restoreIntoIndex(s.lg, s.kvindex) for { - keys, vals := tx.UnsafeRange(buckets.Key, min, max, int64(restoreChunkKeys)) + keys, vals := tx.UnsafeRange(schema.Key, min, max, int64(restoreChunkKeys)) if len(keys) == 0 { break } @@ -369,9 +352,9 @@ func (s *store) restore() error { break } // next set begins after where this one ended - newMin := bytesToRev(keys[len(keys)-1][:revBytesLen]) - newMin.sub++ - revToBytes(newMin, min) + newMin := BytesToRev(keys[len(keys)-1][:revBytesLen]) + newMin.Sub++ + min = RevToBytes(newMin, min) } close(rkvc) @@ -405,7 +388,7 @@ func (s *store) restore() error { for key, lid := range keyToLease { if s.le == nil { - tx.Unlock() + tx.RUnlock() panic("no lessor to attach lease") } err := s.le.Attach(lid, []lease.LeaseItem{{Key: key}}) @@ -417,8 +400,7 @@ func (s *store) restore() error { ) } } - - tx.Unlock() + tx.RUnlock() s.lg.Info("kvstore restored", zap.Int64("current-rev", s.currentRev)) @@ -431,8 +413,6 @@ func (s *store) restore() error { } else { s.lg.Info( "resume scheduled compaction", - zap.Stringer("meta-bucket-name", buckets.Meta), - zap.String("meta-bucket-name-key", string(scheduledCompactKeyName)), zap.Int64("scheduled-compact-revision", scheduledCompact), ) } @@ -474,18 +454,25 @@ func restoreIntoIndex(lg *zap.Logger, idx index) (chan<- revKeyValue, <-chan int ok = true } } - rev := bytesToRev(rkv.key) - currentRev = rev.main + + rev := BytesToRev(rkv.key) + verify.Verify(func() { + if rev.Main < currentRev { + panic(fmt.Errorf("revision %d shouldn't be less than the previous revision %d", rev.Main, currentRev)) + } + }) + currentRev = rev.Main + if ok { if isTombstone(rkv.key) { - if err := ki.tombstone(lg, rev.main, rev.sub); err != nil { + if err := ki.tombstone(lg, rev.Main, rev.Sub); err != nil { lg.Warn("tombstone encountered error", zap.Error(err)) } continue } - ki.put(lg, rev.main, rev.sub) + ki.put(lg, rev.Main, rev.Sub) } else if !isTombstone(rkv.key) { - ki.restore(lg, revision{rkv.kv.CreateRevision, 0}, rev, rkv.kv.Version) + ki.restore(lg, Revision{Main: rkv.kv.CreateRevision}, rev, rkv.kv.Version) idx.Insert(ki) kiCache[rkv.kstr] = ki } @@ -523,9 +510,6 @@ func (s *store) setupMetricsReporter() { reportDbTotalSizeInBytesMu.Lock() reportDbTotalSizeInBytes = func() float64 { return float64(b.Size()) } reportDbTotalSizeInBytesMu.Unlock() - reportDbTotalSizeInBytesDebugMu.Lock() - reportDbTotalSizeInBytesDebug = func() float64 { return float64(b.Size()) } - reportDbTotalSizeInBytesDebugMu.Unlock() reportDbTotalSizeInUseInBytesMu.Lock() reportDbTotalSizeInUseInBytes = func() float64 { return float64(b.SizeInUse()) } reportDbTotalSizeInUseInBytesMu.Unlock() @@ -548,23 +532,6 @@ func (s *store) setupMetricsReporter() { reportCompactRevMu.Unlock() } -// appendMarkTombstone appends tombstone mark to normal revision bytes. -func appendMarkTombstone(lg *zap.Logger, b []byte) []byte { - if len(b) != revBytesLen { - lg.Panic( - "cannot append tombstone mark to non-normal revision bytes", - zap.Int("expected-revision-bytes-size", revBytesLen), - zap.Int("given-revision-bytes-size", len(b)), - ) - } - return append(b, markTombstone) -} - -// isTombstone checks whether the revision bytes is a tombstone. -func isTombstone(b []byte) bool { - return len(b) == markedRevBytesLen && b[markBytePosition] == markTombstone -} - func (s *store) HashStorage() HashStorage { return s.hashes } diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/kvstore_compaction.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/kvstore_compaction.go similarity index 85% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/kvstore_compaction.go rename to vendor/go.etcd.io/etcd/server/v3/storage/mvcc/kvstore_compaction.go index ad8c5cb5bcfec..45409114e3928 100644 --- a/vendor/go.etcd.io/etcd/server/v3/mvcc/kvstore_compaction.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/kvstore_compaction.go @@ -20,8 +20,9 @@ import ( "time" humanize "github.com/dustin/go-humanize" - "go.etcd.io/etcd/server/v3/mvcc/buckets" "go.uber.org/zap" + + "go.etcd.io/etcd/server/v3/storage/schema" ) func (s *store) scheduleCompaction(compactMainRev, prevCompactRev int64) (KeyValueHash, error) { @@ -43,19 +44,18 @@ func (s *store) scheduleCompaction(compactMainRev, prevCompactRev int64) (KeyVal defer batchTicker.Stop() h := newKVHasher(prevCompactRev, compactMainRev, keep) last := make([]byte, 8+1+8) - for { - var rev revision + var rev Revision start := time.Now() tx := s.b.BatchTx() tx.LockOutsideApply() - keys, values := tx.UnsafeRange(buckets.Key, last, end, int64(batchNum)) + keys, values := tx.UnsafeRange(schema.Key, last, end, int64(batchNum)) for i := range keys { - rev = bytesToRev(keys[i]) + rev = BytesToRev(keys[i]) if _, ok := keep[rev]; !ok { - tx.UnsafeDelete(buckets.Key, keys[i]) + tx.UnsafeDelete(schema.Key, keys[i]) keyCompactions++ } h.WriteKeyValue(keys[i], values[i]) @@ -63,10 +63,9 @@ func (s *store) scheduleCompaction(compactMainRev, prevCompactRev int64) (KeyVal if len(keys) < batchNum { // gofail: var compactBeforeSetFinishedCompact struct{} - rbytes := make([]byte, 8+1+8) - revToBytes(revision{main: compactMainRev}, rbytes) - tx.UnsafePut(buckets.Meta, finishedCompactKeyName, rbytes) + UnsafeSetFinishedCompact(tx, compactMainRev) tx.Unlock() + // gofail: var compactAfterSetFinishedCompact struct{} hash := h.Hash() size, sizeInUse := s.b.Size(), s.b.SizeInUse() s.lg.Info( @@ -82,11 +81,13 @@ func (s *store) scheduleCompaction(compactMainRev, prevCompactRev int64) (KeyVal return hash, nil } - // update last - revToBytes(revision{main: rev.main, sub: rev.sub + 1}, last) tx.Unlock() + // update last + last = RevToBytes(Revision{Main: rev.Main, Sub: rev.Sub + 1}, last) // Immediately commit the compaction deletes instead of letting them accumulate in the write buffer + // gofail: var compactBeforeCommitBatch struct{} s.b.ForceCommit() + // gofail: var compactAfterCommitBatch struct{} dbCompactionPauseMs.Observe(float64(time.Since(start) / time.Millisecond)) select { diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/kvstore_txn.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/kvstore_txn.go similarity index 80% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/kvstore_txn.go rename to vendor/go.etcd.io/etcd/server/v3/storage/mvcc/kvstore_txn.go index 855ededddb586..c44a2cb3d915b 100644 --- a/vendor/go.etcd.io/etcd/server/v3/mvcc/kvstore_txn.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/kvstore_txn.go @@ -18,17 +18,23 @@ import ( "context" "fmt" + "go.uber.org/zap" + "go.etcd.io/etcd/api/v3/mvccpb" "go.etcd.io/etcd/pkg/v3/traceutil" "go.etcd.io/etcd/server/v3/lease" - "go.etcd.io/etcd/server/v3/mvcc/backend" - "go.etcd.io/etcd/server/v3/mvcc/buckets" - "go.uber.org/zap" + "go.etcd.io/etcd/server/v3/storage/backend" + "go.etcd.io/etcd/server/v3/storage/schema" ) type storeTxnRead struct { - s *store + storeTxnCommon tx backend.ReadTx +} + +type storeTxnCommon struct { + s *store + tx backend.UnsafeReader firstRev int64 rev int64 @@ -53,23 +59,85 @@ func (s *store) Read(mode ReadTxMode, trace *traceutil.Trace) TxnRead { tx.RLock() // RLock is no-op. concurrentReadTx does not need to be locked after it is created. firstRev, rev := s.compactMainRev, s.currentRev s.revMu.RUnlock() - return newMetricsTxnRead(&storeTxnRead{s, tx, firstRev, rev, trace}) + return newMetricsTxnRead(&storeTxnRead{storeTxnCommon{s, tx, firstRev, rev, trace}, tx}) } -func (tr *storeTxnRead) FirstRev() int64 { return tr.firstRev } -func (tr *storeTxnRead) Rev() int64 { return tr.rev } +func (tr *storeTxnCommon) FirstRev() int64 { return tr.firstRev } +func (tr *storeTxnCommon) Rev() int64 { return tr.rev } -func (tr *storeTxnRead) Range(ctx context.Context, key, end []byte, ro RangeOptions) (r *RangeResult, err error) { +func (tr *storeTxnCommon) Range(ctx context.Context, key, end []byte, ro RangeOptions) (r *RangeResult, err error) { return tr.rangeKeys(ctx, key, end, tr.Rev(), ro) } +func (tr *storeTxnCommon) rangeKeys(ctx context.Context, key, end []byte, curRev int64, ro RangeOptions) (*RangeResult, error) { + rev := ro.Rev + if rev > curRev { + return &RangeResult{KVs: nil, Count: -1, Rev: curRev}, ErrFutureRev + } + if rev <= 0 { + rev = curRev + } + if rev < tr.s.compactMainRev { + return &RangeResult{KVs: nil, Count: -1, Rev: 0}, ErrCompacted + } + if ro.Count { + total := tr.s.kvindex.CountRevisions(key, end, rev) + tr.trace.Step("count revisions from in-memory index tree") + return &RangeResult{KVs: nil, Count: total, Rev: curRev}, nil + } + revpairs, total := tr.s.kvindex.Revisions(key, end, rev, int(ro.Limit)) + tr.trace.Step("range keys from in-memory index tree") + if len(revpairs) == 0 { + return &RangeResult{KVs: nil, Count: total, Rev: curRev}, nil + } + + limit := int(ro.Limit) + if limit <= 0 || limit > len(revpairs) { + limit = len(revpairs) + } + + kvs := make([]mvccpb.KeyValue, limit) + revBytes := NewRevBytes() + for i, revpair := range revpairs[:len(kvs)] { + select { + case <-ctx.Done(): + return nil, fmt.Errorf("rangeKeys: context cancelled: %w", ctx.Err()) + default: + } + revBytes = RevToBytes(revpair, revBytes) + _, vs := tr.tx.UnsafeRange(schema.Key, revBytes, nil, 0) + if len(vs) != 1 { + tr.s.lg.Fatal( + "range failed to find revision pair", + zap.Int64("revision-main", revpair.Main), + zap.Int64("revision-sub", revpair.Sub), + zap.Int64("revision-current", curRev), + zap.Int64("range-option-rev", ro.Rev), + zap.Int64("range-option-limit", ro.Limit), + zap.Binary("key", key), + zap.Binary("end", end), + zap.Int("len-revpairs", len(revpairs)), + zap.Int("len-values", len(vs)), + ) + } + if err := kvs[i].Unmarshal(vs[0]); err != nil { + tr.s.lg.Fatal( + "failed to unmarshal mvccpb.KeyValue", + zap.Error(err), + ) + } + } + tr.trace.Step("range keys from bolt db") + return &RangeResult{KVs: kvs, Count: total, Rev: curRev}, nil +} + func (tr *storeTxnRead) End() { tr.tx.RUnlock() // RUnlock signals the end of concurrentReadTx. tr.s.mu.RUnlock() } type storeTxnWrite struct { - storeTxnRead + storeTxnCommon tx backend.BatchTx // beginRev is the revision where the txn begins; it will write to the next revision. beginRev int64 @@ -81,10 +149,10 @@ func (s *store) Write(trace *traceutil.Trace) TxnWrite { tx := s.b.BatchTx() tx.LockInsideApply() tw := &storeTxnWrite{ - storeTxnRead: storeTxnRead{s, tx, 0, 0, trace}, - tx: tx, - beginRev: s.currentRev, - changes: make([]mvccpb.KeyValue, 0, 4), + storeTxnCommon: storeTxnCommon{s, tx, 0, 0, trace}, + tx: tx, + beginRev: s.currentRev, + changes: make([]mvccpb.KeyValue, 0, 4), } return newMetricsTxnWrite(tw) } @@ -125,68 +193,6 @@ func (tw *storeTxnWrite) End() { tw.s.mu.RUnlock() } -func (tr *storeTxnRead) rangeKeys(ctx context.Context, key, end []byte, curRev int64, ro RangeOptions) (*RangeResult, error) { - rev := ro.Rev - if rev > curRev { - return &RangeResult{KVs: nil, Count: -1, Rev: curRev}, ErrFutureRev - } - if rev <= 0 { - rev = curRev - } - if rev < tr.s.compactMainRev { - return &RangeResult{KVs: nil, Count: -1, Rev: 0}, ErrCompacted - } - if ro.Count { - total := tr.s.kvindex.CountRevisions(key, end, rev) - tr.trace.Step("count revisions from in-memory index tree") - return &RangeResult{KVs: nil, Count: total, Rev: curRev}, nil - } - revpairs, total := tr.s.kvindex.Revisions(key, end, rev, int(ro.Limit)) - tr.trace.Step("range keys from in-memory index tree") - if len(revpairs) == 0 { - return &RangeResult{KVs: nil, Count: total, Rev: curRev}, nil - } - - limit := int(ro.Limit) - if limit <= 0 || limit > len(revpairs) { - limit = len(revpairs) - } - - kvs := make([]mvccpb.KeyValue, limit) - revBytes := newRevBytes() - for i, revpair := range revpairs[:len(kvs)] { - select { - case <-ctx.Done(): - return nil, fmt.Errorf("rangeKeys: context cancelled: %w", ctx.Err()) - default: - } - revToBytes(revpair, revBytes) - _, vs := tr.tx.UnsafeRange(buckets.Key, revBytes, nil, 0) - if len(vs) != 1 { - tr.s.lg.Fatal( - "range failed to find revision pair", - zap.Int64("revision-main", revpair.main), - zap.Int64("revision-sub", revpair.sub), - zap.Int64("revision-current", curRev), - zap.Int64("range-option-rev", ro.Rev), - zap.Int64("range-option-limit", ro.Limit), - zap.Binary("key", key), - zap.Binary("end", end), - zap.Int("len-revpairs", len(revpairs)), - zap.Int("len-values", len(vs)), - ) - } - if err := kvs[i].Unmarshal(vs[0]); err != nil { - tr.s.lg.Fatal( - "failed to unmarshal mvccpb.KeyValue", - zap.Error(err), - ) - } - } - tr.trace.Step("range keys from bolt db") - return &RangeResult{KVs: kvs, Count: total, Rev: curRev}, nil -} - func (tw *storeTxnWrite) put(key, value []byte, leaseID lease.LeaseID) { rev := tw.beginRev + 1 c := rev @@ -196,13 +202,13 @@ func (tw *storeTxnWrite) put(key, value []byte, leaseID lease.LeaseID) { // get its previous leaseID _, created, ver, err := tw.s.kvindex.Get(key, rev) if err == nil { - c = created.main + c = created.Main oldLease = tw.s.le.GetLease(lease.LeaseItem{Key: string(key)}) tw.trace.Step("get key's previous created_revision and leaseID") } - ibytes := newRevBytes() - idxRev := revision{main: rev, sub: int64(len(tw.changes))} - revToBytes(idxRev, ibytes) + ibytes := NewRevBytes() + idxRev := Revision{Main: rev, Sub: int64(len(tw.changes))} + ibytes = RevToBytes(idxRev, ibytes) ver = ver + 1 kv := mvccpb.KeyValue{ @@ -216,25 +222,30 @@ func (tw *storeTxnWrite) put(key, value []byte, leaseID lease.LeaseID) { d, err := kv.Marshal() if err != nil { - tw.storeTxnRead.s.lg.Fatal( + tw.storeTxnCommon.s.lg.Fatal( "failed to marshal mvccpb.KeyValue", zap.Error(err), ) } tw.trace.Step("marshal mvccpb.KeyValue") - tw.tx.UnsafeSeqPut(buckets.Key, ibytes, d) + tw.tx.UnsafeSeqPut(schema.Key, ibytes, d) tw.s.kvindex.Put(key, idxRev) tw.changes = append(tw.changes, kv) tw.trace.Step("store kv pair into bolt db") + if oldLease == leaseID { + tw.trace.Step("attach lease to kv pair") + return + } + if oldLease != lease.NoLease { if tw.s.le == nil { panic("no lessor to detach lease") } err = tw.s.le.Detach(oldLease, []lease.LeaseItem{{Key: string(key)}}) if err != nil { - tw.storeTxnRead.s.lg.Error( + tw.storeTxnCommon.s.lg.Error( "failed to detach old lease from a key", zap.Error(err), ) @@ -268,26 +279,24 @@ func (tw *storeTxnWrite) deleteRange(key, end []byte) int64 { } func (tw *storeTxnWrite) delete(key []byte) { - ibytes := newRevBytes() - idxRev := revision{main: tw.beginRev + 1, sub: int64(len(tw.changes))} - revToBytes(idxRev, ibytes) - - ibytes = appendMarkTombstone(tw.storeTxnRead.s.lg, ibytes) + ibytes := NewRevBytes() + idxRev := newBucketKey(tw.beginRev+1, int64(len(tw.changes)), true) + ibytes = BucketKeyToBytes(idxRev, ibytes) kv := mvccpb.KeyValue{Key: key} d, err := kv.Marshal() if err != nil { - tw.storeTxnRead.s.lg.Fatal( + tw.storeTxnCommon.s.lg.Fatal( "failed to marshal mvccpb.KeyValue", zap.Error(err), ) } - tw.tx.UnsafeSeqPut(buckets.Key, ibytes, d) - err = tw.s.kvindex.Tombstone(key, idxRev) + tw.tx.UnsafeSeqPut(schema.Key, ibytes, d) + err = tw.s.kvindex.Tombstone(key, idxRev.Revision) if err != nil { - tw.storeTxnRead.s.lg.Fatal( + tw.storeTxnCommon.s.lg.Fatal( "failed to tombstone an existing key", zap.String("key", string(key)), zap.Error(err), @@ -301,7 +310,7 @@ func (tw *storeTxnWrite) delete(key []byte) { if leaseID != lease.NoLease { err = tw.s.le.Detach(leaseID, []lease.LeaseItem{item}) if err != nil { - tw.storeTxnRead.s.lg.Error( + tw.storeTxnCommon.s.lg.Error( "failed to detach old lease from a key", zap.Error(err), ) diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/metrics.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/metrics.go similarity index 95% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/metrics.go rename to vendor/go.etcd.io/etcd/server/v3/storage/mvcc/metrics.go index f28d114e2bc99..b75abbcc08930 100644 --- a/vendor/go.etcd.io/etcd/server/v3/mvcc/metrics.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/metrics.go @@ -28,13 +28,6 @@ var ( Name: "range_total", Help: "Total number of ranges seen by this member.", }) - rangeCounterDebug = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "range_total", - Help: "Total number of ranges seen by this member.", - }) putCounter = prometheus.NewCounter( prometheus.CounterOpts{ @@ -176,10 +169,6 @@ var ( reportDbTotalSizeInBytesMu sync.RWMutex reportDbTotalSizeInBytes = func() float64 { return 0 } - // overridden by mvcc initialization - reportDbTotalSizeInBytesDebugMu sync.RWMutex - reportDbTotalSizeInBytesDebug = func() float64 { return 0 } - dbTotalSizeInUse = prometheus.NewGaugeFunc(prometheus.GaugeOpts{ Namespace: "etcd", Subsystem: "mvcc", @@ -280,7 +269,6 @@ var ( func init() { prometheus.MustRegister(rangeCounter) - prometheus.MustRegister(rangeCounterDebug) prometheus.MustRegister(putCounter) prometheus.MustRegister(deleteCounter) prometheus.MustRegister(txnCounter) diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/metrics_txn.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/metrics_txn.go similarity index 96% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/metrics_txn.go rename to vendor/go.etcd.io/etcd/server/v3/storage/mvcc/metrics_txn.go index af844f8468b1b..aef877a1c159d 100644 --- a/vendor/go.etcd.io/etcd/server/v3/mvcc/metrics_txn.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/metrics_txn.go @@ -61,7 +61,6 @@ func (tw *metricsTxnWrite) End() { ranges := float64(tw.ranges) rangeCounter.Add(ranges) - rangeCounterDebug.Add(ranges) // TODO: remove in 3.5 release puts := float64(tw.puts) putCounter.Add(puts) diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/revision.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/revision.go new file mode 100644 index 0000000000000..db44da29343ec --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/revision.go @@ -0,0 +1,122 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "encoding/binary" + "fmt" +) + +const ( + // revBytesLen is the byte length of a normal revision. + // First 8 bytes is the revision.main in big-endian format. The 9th byte + // is a '_'. The last 8 bytes is the revision.sub in big-endian format. + revBytesLen = 8 + 1 + 8 + // markedRevBytesLen is the byte length of marked revision. + // The first `revBytesLen` bytes represents a normal revision. The last + // one byte is the mark. + markedRevBytesLen = revBytesLen + 1 + markBytePosition = markedRevBytesLen - 1 + markTombstone byte = 't' +) + +type Revision struct { + // Main is the main revision of a set of changes that happen atomically. + Main int64 + // Sub is the sub revision of a change in a set of changes that happen + // atomically. Each change has different increasing sub revision in that + // set. + Sub int64 +} + +func (a Revision) GreaterThan(b Revision) bool { + if a.Main > b.Main { + return true + } + if a.Main < b.Main { + return false + } + return a.Sub > b.Sub +} + +func RevToBytes(rev Revision, bytes []byte) []byte { + return BucketKeyToBytes(newBucketKey(rev.Main, rev.Sub, false), bytes) +} + +func BytesToRev(bytes []byte) Revision { + return BytesToBucketKey(bytes).Revision +} + +// BucketKey indicates modification of the key-value space. +// The set of changes that share same main revision changes the key-value space atomically. +type BucketKey struct { + Revision + tombstone bool +} + +func newBucketKey(main, sub int64, isTombstone bool) BucketKey { + return BucketKey{ + Revision: Revision{ + Main: main, + Sub: sub, + }, + tombstone: isTombstone, + } +} + +func NewRevBytes() []byte { + return make([]byte, revBytesLen, markedRevBytesLen) +} + +func BucketKeyToBytes(rev BucketKey, bytes []byte) []byte { + binary.BigEndian.PutUint64(bytes, uint64(rev.Main)) + bytes[8] = '_' + binary.BigEndian.PutUint64(bytes[9:], uint64(rev.Sub)) + if rev.tombstone { + switch len(bytes) { + case revBytesLen: + bytes = append(bytes, markTombstone) + case markedRevBytesLen: + bytes[markBytePosition] = markTombstone + } + } + return bytes +} + +func BytesToBucketKey(bytes []byte) BucketKey { + if (len(bytes) != revBytesLen) && (len(bytes) != markedRevBytesLen) { + panic(fmt.Sprintf("invalid revision length: %d", len(bytes))) + } + if bytes[8] != '_' { + panic(fmt.Sprintf("invalid separator in bucket key: %q", bytes[8])) + } + main := int64(binary.BigEndian.Uint64(bytes[0:8])) + sub := int64(binary.BigEndian.Uint64(bytes[9:])) + if main < 0 || sub < 0 { + panic(fmt.Sprintf("negative revision: main=%d sub=%d", main, sub)) + } + return BucketKey{ + Revision: Revision{ + Main: main, + Sub: sub, + }, + tombstone: isTombstone(bytes), + } +} + +// isTombstone checks whether the revision bytes is a tombstone. +func isTombstone(b []byte) bool { + return len(b) == markedRevBytesLen && b[markBytePosition] == markTombstone +} diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/store.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/store.go new file mode 100644 index 0000000000000..523b8cd8f2cab --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/store.go @@ -0,0 +1,60 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "go.etcd.io/etcd/server/v3/storage/backend" + "go.etcd.io/etcd/server/v3/storage/schema" +) + +func UnsafeReadFinishedCompact(tx backend.UnsafeReader) (int64, bool) { + _, finishedCompactBytes := tx.UnsafeRange(schema.Meta, schema.FinishedCompactKeyName, nil, 0) + if len(finishedCompactBytes) != 0 { + return BytesToRev(finishedCompactBytes[0]).Main, true + } + return 0, false +} + +func UnsafeReadScheduledCompact(tx backend.UnsafeReader) (int64, bool) { + _, scheduledCompactBytes := tx.UnsafeRange(schema.Meta, schema.ScheduledCompactKeyName, nil, 0) + if len(scheduledCompactBytes) != 0 { + return BytesToRev(scheduledCompactBytes[0]).Main, true + } + return 0, false +} + +func SetScheduledCompact(tx backend.BatchTx, value int64) { + tx.LockInsideApply() + defer tx.Unlock() + UnsafeSetScheduledCompact(tx, value) +} + +func UnsafeSetScheduledCompact(tx backend.UnsafeWriter, value int64) { + rbytes := NewRevBytes() + rbytes = RevToBytes(Revision{Main: value}, rbytes) + tx.UnsafePut(schema.Meta, schema.ScheduledCompactKeyName, rbytes) +} + +func SetFinishedCompact(tx backend.BatchTx, value int64) { + tx.LockInsideApply() + defer tx.Unlock() + UnsafeSetFinishedCompact(tx, value) +} + +func UnsafeSetFinishedCompact(tx backend.UnsafeWriter, value int64) { + rbytes := NewRevBytes() + rbytes = RevToBytes(Revision{Main: value}, rbytes) + tx.UnsafePut(schema.Meta, schema.FinishedCompactKeyName, rbytes) +} diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/watchable_store.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/watchable_store.go similarity index 95% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/watchable_store.go rename to vendor/go.etcd.io/etcd/server/v3/storage/mvcc/watchable_store.go index cdac20ad5edcd..ad17b2be7acec 100644 --- a/vendor/go.etcd.io/etcd/server/v3/mvcc/watchable_store.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/watchable_store.go @@ -18,14 +18,14 @@ import ( "sync" "time" + "go.uber.org/zap" + "go.etcd.io/etcd/api/v3/mvccpb" clientv3 "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/pkg/v3/traceutil" "go.etcd.io/etcd/server/v3/lease" - "go.etcd.io/etcd/server/v3/mvcc/backend" - "go.etcd.io/etcd/server/v3/mvcc/buckets" - - "go.uber.org/zap" + "go.etcd.io/etcd/server/v3/storage/backend" + "go.etcd.io/etcd/server/v3/storage/schema" ) // non-const so modifiable by tests @@ -39,6 +39,8 @@ var ( maxWatchersPerSync = 512 ) +func ChanBufLen() int { return chanBufLen } + type watchable interface { watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse, fcs ...FilterFunc) (*watcher, cancelFunc) progress(w *watcher) @@ -213,6 +215,10 @@ func (s *watchableStore) Restore(b backend.Backend) error { func (s *watchableStore) syncWatchersLoop() { defer s.wg.Done() + waitDuration := 100 * time.Millisecond + delayTicker := time.NewTicker(waitDuration) + defer delayTicker.Stop() + for { s.mu.RLock() st := time.Now() @@ -225,15 +231,15 @@ func (s *watchableStore) syncWatchersLoop() { } syncDuration := time.Since(st) - waitDuration := 100 * time.Millisecond + delayTicker.Reset(waitDuration) // more work pending? if unsyncedWatchers != 0 && lastUnsyncedWatchers > unsyncedWatchers { // be fair to other store operations by yielding time taken - waitDuration = syncDuration + delayTicker.Reset(syncDuration) } select { - case <-time.After(waitDuration): + case <-delayTicker.C: case <-s.stopc: return } @@ -348,22 +354,22 @@ func (s *watchableStore) syncWatchers() int { compactionRev := s.store.compactMainRev wg, minRev := s.unsynced.choose(maxWatchersPerSync, curRev, compactionRev) - minBytes, maxBytes := newRevBytes(), newRevBytes() - revToBytes(revision{main: minRev}, minBytes) - revToBytes(revision{main: curRev + 1}, maxBytes) + minBytes, maxBytes := NewRevBytes(), NewRevBytes() + minBytes = RevToBytes(Revision{Main: minRev}, minBytes) + maxBytes = RevToBytes(Revision{Main: curRev + 1}, maxBytes) // UnsafeRange returns keys and values. And in boltdb, keys are revisions. // values are actual key-value pairs in backend. tx := s.store.b.ReadTx() tx.RLock() - revs, vs := tx.UnsafeRange(buckets.Key, minBytes, maxBytes, 0) + revs, vs := tx.UnsafeRange(schema.Key, minBytes, maxBytes, 0) evs := kvsToEvents(s.store.lg, wg, revs, vs) // Must unlock after kvsToEvents, because vs (come from boltdb memory) is not deep copy. // We can only unlock after Unmarshal, which will do deep copy. // Otherwise we will trigger SIGSEGV during boltdb re-mmap. tx.RUnlock() - var victims watcherBatch + victims := make(watcherBatch) wb := newWatcherBatch(wg, evs) for w := range wg.watchers { if w.minRev < compactionRev { @@ -388,9 +394,6 @@ func (s *watchableStore) syncWatchers() int { if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: curRev}) { pendingEventsGauge.Add(float64(len(eb.evs))) } else { - if victims == nil { - victims = make(watcherBatch) - } w.victim = true } @@ -432,7 +435,7 @@ func kvsToEvents(lg *zap.Logger, wg *watcherGroup, revs, vals [][]byte) (evs []m if isTombstone(revs[i]) { ty = mvccpb.DELETE // patch in mod revision so watchers won't skip - kv.ModRevision = bytesToRev(revs[i]).main + kv.ModRevision = BytesToRev(revs[i]).Main } evs = append(evs, mvccpb.Event{Kv: &kv, Type: ty}) } @@ -442,7 +445,7 @@ func kvsToEvents(lg *zap.Logger, wg *watcherGroup, revs, vals [][]byte) (evs []m // notify notifies the fact that given event at the given rev just happened to // watchers that watch on the key of the event. func (s *watchableStore) notify(rev int64, evs []mvccpb.Event) { - var victim watcherBatch + victim := make(watcherBatch) for w, eb := range newWatcherBatch(&s.synced, evs) { if eb.revs != 1 { s.store.lg.Panic( @@ -454,9 +457,6 @@ func (s *watchableStore) notify(rev int64, evs []mvccpb.Event) { pendingEventsGauge.Add(float64(len(eb.evs))) } else { // move slow watcher to victims - if victim == nil { - victim = make(watcherBatch) - } w.victim = true victim[w] = eb s.synced.delete(w) @@ -471,7 +471,7 @@ func (s *watchableStore) notify(rev int64, evs []mvccpb.Event) { } func (s *watchableStore) addVictim(victim watcherBatch) { - if victim == nil { + if len(victim) == 0 { return } s.victims = append(s.victims, victim) diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/watchable_store_txn.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/watchable_store_txn.go similarity index 100% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/watchable_store_txn.go rename to vendor/go.etcd.io/etcd/server/v3/storage/mvcc/watchable_store_txn.go diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/watcher.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/watcher.go similarity index 100% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/watcher.go rename to vendor/go.etcd.io/etcd/server/v3/storage/mvcc/watcher.go diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/watcher_group.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/watcher_group.go similarity index 100% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/watcher_group.go rename to vendor/go.etcd.io/etcd/server/v3/storage/mvcc/watcher_group.go diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/quota.go b/vendor/go.etcd.io/etcd/server/v3/storage/quota.go similarity index 72% rename from vendor/go.etcd.io/etcd/server/v3/etcdserver/quota.go rename to vendor/go.etcd.io/etcd/server/v3/storage/quota.go index c5fadc4b1e43d..f9ff72d7e826b 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/quota.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/quota.go @@ -12,15 +12,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -package etcdserver +package storage import ( "sync" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - humanize "github.com/dustin/go-humanize" "go.uber.org/zap" + + pb "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.etcd.io/etcd/server/v3/storage/backend" ) const ( @@ -37,28 +38,28 @@ const ( // too few resources available within the quota to apply the request. type Quota interface { // Available judges whether the given request fits within the quota. - Available(req interface{}) bool + Available(req any) bool // Cost computes the charge against the quota for a given request. - Cost(req interface{}) int + Cost(req any) int // Remaining is the amount of charge left for the quota. Remaining() int64 } type passthroughQuota struct{} -func (*passthroughQuota) Available(interface{}) bool { return true } -func (*passthroughQuota) Cost(interface{}) int { return 0 } -func (*passthroughQuota) Remaining() int64 { return 1 } +func (*passthroughQuota) Available(any) bool { return true } +func (*passthroughQuota) Cost(any) int { return 0 } +func (*passthroughQuota) Remaining() int64 { return 1 } -type backendQuota struct { - s *EtcdServer +type BackendQuota struct { + be backend.Backend maxBackendBytes int64 } const ( // leaseOverhead is an estimate for the cost of storing a lease leaseOverhead = 64 - // kvOverhead is an estimate for the cost of storing a key's metadata + // kvOverhead is an estimate for the cost of storing a key's Metadata kvOverhead = 256 ) @@ -71,23 +72,21 @@ var ( ) // NewBackendQuota creates a quota layer with the given storage limit. -func NewBackendQuota(s *EtcdServer, name string) Quota { - lg := s.Logger() - quotaBackendBytes.Set(float64(s.Cfg.QuotaBackendBytes)) - - if s.Cfg.QuotaBackendBytes < 0 { +func NewBackendQuota(lg *zap.Logger, quotaBackendBytesCfg int64, be backend.Backend, name string) Quota { + quotaBackendBytes.Set(float64(quotaBackendBytesCfg)) + if quotaBackendBytesCfg < 0 { // disable quotas if negative quotaLogOnce.Do(func() { lg.Info( "disabled backend quota", zap.String("quota-name", name), - zap.Int64("quota-size-bytes", s.Cfg.QuotaBackendBytes), + zap.Int64("quota-size-bytes", quotaBackendBytesCfg), ) }) return &passthroughQuota{} } - if s.Cfg.QuotaBackendBytes == 0 { + if quotaBackendBytesCfg == 0 { // use default size if no quota size given quotaLogOnce.Do(func() { if lg != nil { @@ -100,16 +99,16 @@ func NewBackendQuota(s *EtcdServer, name string) Quota { } }) quotaBackendBytes.Set(float64(DefaultQuotaBytes)) - return &backendQuota{s, DefaultQuotaBytes} + return &BackendQuota{be, DefaultQuotaBytes} } quotaLogOnce.Do(func() { - if s.Cfg.QuotaBackendBytes > MaxQuotaBytes { + if quotaBackendBytesCfg > MaxQuotaBytes { lg.Warn( "quota exceeds the maximum value", zap.String("quota-name", name), - zap.Int64("quota-size-bytes", s.Cfg.QuotaBackendBytes), - zap.String("quota-size", humanize.Bytes(uint64(s.Cfg.QuotaBackendBytes))), + zap.Int64("quota-size-bytes", quotaBackendBytesCfg), + zap.String("quota-size", humanize.Bytes(uint64(quotaBackendBytesCfg))), zap.Int64("quota-maximum-size-bytes", MaxQuotaBytes), zap.String("quota-maximum-size", maxQuotaSize), ) @@ -117,24 +116,24 @@ func NewBackendQuota(s *EtcdServer, name string) Quota { lg.Info( "enabled backend quota", zap.String("quota-name", name), - zap.Int64("quota-size-bytes", s.Cfg.QuotaBackendBytes), - zap.String("quota-size", humanize.Bytes(uint64(s.Cfg.QuotaBackendBytes))), + zap.Int64("quota-size-bytes", quotaBackendBytesCfg), + zap.String("quota-size", humanize.Bytes(uint64(quotaBackendBytesCfg))), ) }) - return &backendQuota{s, s.Cfg.QuotaBackendBytes} + return &BackendQuota{be, quotaBackendBytesCfg} } -func (b *backendQuota) Available(v interface{}) bool { +func (b *BackendQuota) Available(v any) bool { cost := b.Cost(v) // if there are no mutating requests, it's safe to pass through if cost == 0 { return true } - // TODO: maybe optimize backend.Size() - return b.s.Backend().Size()+int64(cost) < b.maxBackendBytes + // TODO: maybe optimize Backend.Size() + return b.be.Size()+int64(cost) < b.maxBackendBytes } -func (b *backendQuota) Cost(v interface{}) int { +func (b *BackendQuota) Cost(v any) int { switch r := v.(type) { case *pb.PutRequest: return costPut(r) @@ -172,6 +171,6 @@ func costTxn(r *pb.TxnRequest) int { return sizeSuccess } -func (b *backendQuota) Remaining() int64 { - return b.maxBackendBytes - b.s.Backend().Size() +func (b *BackendQuota) Remaining() int64 { + return b.maxBackendBytes - b.be.Size() } diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/schema/actions.go b/vendor/go.etcd.io/etcd/server/v3/storage/schema/actions.go new file mode 100644 index 0000000000000..415ffcd792f60 --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/storage/schema/actions.go @@ -0,0 +1,94 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package schema + +import ( + "go.uber.org/zap" + + "go.etcd.io/etcd/server/v3/storage/backend" +) + +type action interface { + // unsafeDo executes the action and returns revert action, when executed + // should restore the state from before. + unsafeDo(tx backend.UnsafeReadWriter) (revert action, err error) +} + +type setKeyAction struct { + Bucket backend.Bucket + FieldName []byte + FieldValue []byte +} + +func (a setKeyAction) unsafeDo(tx backend.UnsafeReadWriter) (action, error) { + revert := restoreFieldValueAction(tx, a.Bucket, a.FieldName) + tx.UnsafePut(a.Bucket, a.FieldName, a.FieldValue) + return revert, nil +} + +type deleteKeyAction struct { + Bucket backend.Bucket + FieldName []byte +} + +func (a deleteKeyAction) unsafeDo(tx backend.UnsafeReadWriter) (action, error) { + revert := restoreFieldValueAction(tx, a.Bucket, a.FieldName) + tx.UnsafeDelete(a.Bucket, a.FieldName) + return revert, nil +} + +func restoreFieldValueAction(tx backend.UnsafeReader, bucket backend.Bucket, fieldName []byte) action { + _, vs := tx.UnsafeRange(bucket, fieldName, nil, 1) + if len(vs) == 1 { + return &setKeyAction{ + Bucket: bucket, + FieldName: fieldName, + FieldValue: vs[0], + } + } + return &deleteKeyAction{ + Bucket: bucket, + FieldName: fieldName, + } +} + +type ActionList []action + +// unsafeExecute executes actions one by one. If one of actions returns error, +// it will revert them. +func (as ActionList) unsafeExecute(lg *zap.Logger, tx backend.UnsafeReadWriter) error { + var revertActions = make(ActionList, 0, len(as)) + for _, a := range as { + revert, err := a.unsafeDo(tx) + + if err != nil { + revertActions.unsafeExecuteInReversedOrder(lg, tx) + return err + } + revertActions = append(revertActions, revert) + } + return nil +} + +// unsafeExecuteInReversedOrder executes actions in revered order. Will panic on +// action error. Should be used when reverting. +func (as ActionList) unsafeExecuteInReversedOrder(lg *zap.Logger, tx backend.UnsafeReadWriter) { + for j := len(as) - 1; j >= 0; j-- { + _, err := as[j].unsafeDo(tx) + if err != nil { + lg.Panic("Cannot recover from revert error", zap.Error(err)) + } + } +} diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/schema/alarm.go b/vendor/go.etcd.io/etcd/server/v3/storage/schema/alarm.go new file mode 100644 index 0000000000000..6e81d0f467124 --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/storage/schema/alarm.go @@ -0,0 +1,97 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package schema + +import ( + "go.uber.org/zap" + + "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.etcd.io/etcd/server/v3/storage/backend" +) + +type alarmBackend struct { + lg *zap.Logger + be backend.Backend +} + +func NewAlarmBackend(lg *zap.Logger, be backend.Backend) *alarmBackend { + return &alarmBackend{ + lg: lg, + be: be, + } +} + +func (s *alarmBackend) CreateAlarmBucket() { + tx := s.be.BatchTx() + tx.LockOutsideApply() + defer tx.Unlock() + tx.UnsafeCreateBucket(Alarm) +} + +func (s *alarmBackend) MustPutAlarm(alarm *etcdserverpb.AlarmMember) { + tx := s.be.BatchTx() + tx.LockInsideApply() + defer tx.Unlock() + s.mustUnsafePutAlarm(tx, alarm) +} + +func (s *alarmBackend) mustUnsafePutAlarm(tx backend.UnsafeWriter, alarm *etcdserverpb.AlarmMember) { + v, err := alarm.Marshal() + if err != nil { + s.lg.Panic("failed to marshal alarm member", zap.Error(err)) + } + + tx.UnsafePut(Alarm, v, nil) +} + +func (s *alarmBackend) MustDeleteAlarm(alarm *etcdserverpb.AlarmMember) { + tx := s.be.BatchTx() + tx.LockInsideApply() + defer tx.Unlock() + s.mustUnsafeDeleteAlarm(tx, alarm) +} + +func (s *alarmBackend) mustUnsafeDeleteAlarm(tx backend.UnsafeWriter, alarm *etcdserverpb.AlarmMember) { + v, err := alarm.Marshal() + if err != nil { + s.lg.Panic("failed to marshal alarm member", zap.Error(err)) + } + + tx.UnsafeDelete(Alarm, v) +} + +func (s *alarmBackend) GetAllAlarms() ([]*etcdserverpb.AlarmMember, error) { + tx := s.be.ReadTx() + tx.RLock() + defer tx.RUnlock() + return s.unsafeGetAllAlarms(tx) +} + +func (s *alarmBackend) unsafeGetAllAlarms(tx backend.UnsafeReader) ([]*etcdserverpb.AlarmMember, error) { + var ms []*etcdserverpb.AlarmMember + err := tx.UnsafeForEach(Alarm, func(k, v []byte) error { + var m etcdserverpb.AlarmMember + if err := m.Unmarshal(k); err != nil { + return err + } + ms = append(ms, &m) + return nil + }) + return ms, err +} + +func (s alarmBackend) ForceCommit() { + s.be.ForceCommit() +} diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/schema/auth.go b/vendor/go.etcd.io/etcd/server/v3/storage/schema/auth.go new file mode 100644 index 0000000000000..3bff8412c9f46 --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/storage/schema/auth.go @@ -0,0 +1,150 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package schema + +import ( + "bytes" + "encoding/binary" + + "go.uber.org/zap" + + "go.etcd.io/etcd/server/v3/auth" + "go.etcd.io/etcd/server/v3/storage/backend" +) + +const ( + revBytesLen = 8 +) + +var ( + authEnabled = []byte{1} + authDisabled = []byte{0} +) + +type authBackend struct { + be backend.Backend + lg *zap.Logger +} + +var _ auth.AuthBackend = (*authBackend)(nil) + +func NewAuthBackend(lg *zap.Logger, be backend.Backend) *authBackend { + return &authBackend{ + be: be, + lg: lg, + } +} + +func (abe *authBackend) CreateAuthBuckets() { + tx := abe.be.BatchTx() + tx.LockOutsideApply() + defer tx.Unlock() + tx.UnsafeCreateBucket(Auth) + tx.UnsafeCreateBucket(AuthUsers) + tx.UnsafeCreateBucket(AuthRoles) +} + +func (abe *authBackend) ForceCommit() { + abe.be.ForceCommit() +} + +func (abe *authBackend) ReadTx() auth.AuthReadTx { + return &authReadTx{tx: abe.be.ReadTx(), lg: abe.lg} +} + +func (abe *authBackend) BatchTx() auth.AuthBatchTx { + return &authBatchTx{tx: abe.be.BatchTx(), lg: abe.lg} +} + +type authReadTx struct { + tx backend.ReadTx + lg *zap.Logger +} + +type authBatchTx struct { + tx backend.BatchTx + lg *zap.Logger +} + +var _ auth.AuthReadTx = (*authReadTx)(nil) +var _ auth.AuthBatchTx = (*authBatchTx)(nil) + +func (atx *authBatchTx) UnsafeSaveAuthEnabled(enabled bool) { + if enabled { + atx.tx.UnsafePut(Auth, AuthEnabledKeyName, authEnabled) + } else { + atx.tx.UnsafePut(Auth, AuthEnabledKeyName, authDisabled) + } +} + +func (atx *authBatchTx) UnsafeSaveAuthRevision(rev uint64) { + revBytes := make([]byte, revBytesLen) + binary.BigEndian.PutUint64(revBytes, rev) + atx.tx.UnsafePut(Auth, AuthRevisionKeyName, revBytes) +} + +func (atx *authBatchTx) UnsafeReadAuthEnabled() bool { + return unsafeReadAuthEnabled(atx.tx) +} + +func (atx *authBatchTx) UnsafeReadAuthRevision() uint64 { + return unsafeReadAuthRevision(atx.tx) +} + +func (atx *authBatchTx) Lock() { + atx.tx.LockInsideApply() +} + +func (atx *authBatchTx) Unlock() { + atx.tx.Unlock() + // Calling Commit() for defensive purpose. If the number of pending writes doesn't exceed batchLimit, + // ReadTx can miss some writes issued by its predecessor BatchTx. + atx.tx.Commit() +} + +func (atx *authReadTx) UnsafeReadAuthEnabled() bool { + return unsafeReadAuthEnabled(atx.tx) +} + +func unsafeReadAuthEnabled(tx backend.UnsafeReader) bool { + _, vs := tx.UnsafeRange(Auth, AuthEnabledKeyName, nil, 0) + if len(vs) == 1 { + if bytes.Equal(vs[0], authEnabled) { + return true + } + } + return false +} + +func (atx *authReadTx) UnsafeReadAuthRevision() uint64 { + return unsafeReadAuthRevision(atx.tx) +} + +func unsafeReadAuthRevision(tx backend.UnsafeReader) uint64 { + _, vs := tx.UnsafeRange(Auth, AuthRevisionKeyName, nil, 0) + if len(vs) != 1 { + // this can happen in the initialization phase + return 0 + } + return binary.BigEndian.Uint64(vs[0]) +} + +func (atx *authReadTx) RLock() { + atx.tx.RLock() +} + +func (atx *authReadTx) RUnlock() { + atx.tx.RUnlock() +} diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/schema/auth_roles.go b/vendor/go.etcd.io/etcd/server/v3/storage/schema/auth_roles.go new file mode 100644 index 0000000000000..6161a0885a910 --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/storage/schema/auth_roles.go @@ -0,0 +1,105 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package schema + +import ( + "go.uber.org/zap" + + "go.etcd.io/etcd/api/v3/authpb" + "go.etcd.io/etcd/server/v3/storage/backend" +) + +func UnsafeCreateAuthRolesBucket(tx backend.UnsafeWriter) { + tx.UnsafeCreateBucket(AuthRoles) +} + +func (abe *authBackend) GetRole(roleName string) *authpb.Role { + tx := abe.ReadTx() + tx.RLock() + defer tx.RUnlock() + return tx.UnsafeGetRole(roleName) +} + +func (atx *authBatchTx) UnsafeGetRole(roleName string) *authpb.Role { + return unsafeGetRole(atx.lg, atx.tx, roleName) +} + +func (abe *authBackend) GetAllRoles() []*authpb.Role { + tx := abe.BatchTx() + tx.Lock() + defer tx.Unlock() + return tx.UnsafeGetAllRoles() +} + +func (atx *authBatchTx) UnsafeGetAllRoles() []*authpb.Role { + return unsafeGetAllRoles(atx.lg, atx.tx) +} + +func (atx *authBatchTx) UnsafePutRole(role *authpb.Role) { + b, err := role.Marshal() + if err != nil { + atx.lg.Panic( + "failed to marshal 'authpb.Role'", + zap.String("role-name", string(role.Name)), + zap.Error(err), + ) + } + + atx.tx.UnsafePut(AuthRoles, role.Name, b) +} + +func (atx *authBatchTx) UnsafeDeleteRole(rolename string) { + atx.tx.UnsafeDelete(AuthRoles, []byte(rolename)) +} + +func (atx *authReadTx) UnsafeGetRole(roleName string) *authpb.Role { + return unsafeGetRole(atx.lg, atx.tx, roleName) +} + +func unsafeGetRole(lg *zap.Logger, tx backend.UnsafeReader, roleName string) *authpb.Role { + _, vs := tx.UnsafeRange(AuthRoles, []byte(roleName), nil, 0) + if len(vs) == 0 { + return nil + } + + role := &authpb.Role{} + err := role.Unmarshal(vs[0]) + if err != nil { + lg.Panic("failed to unmarshal 'authpb.Role'", zap.Error(err)) + } + return role +} + +func (atx *authReadTx) UnsafeGetAllRoles() []*authpb.Role { + return unsafeGetAllRoles(atx.lg, atx.tx) +} + +func unsafeGetAllRoles(lg *zap.Logger, tx backend.UnsafeReader) []*authpb.Role { + _, vs := tx.UnsafeRange(AuthRoles, []byte{0}, []byte{0xff}, -1) + if len(vs) == 0 { + return nil + } + + roles := make([]*authpb.Role, len(vs)) + for i := range vs { + role := &authpb.Role{} + err := role.Unmarshal(vs[i]) + if err != nil { + lg.Panic("failed to unmarshal 'authpb.Role'", zap.Error(err)) + } + roles[i] = role + } + return roles +} diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/schema/auth_users.go b/vendor/go.etcd.io/etcd/server/v3/storage/schema/auth_users.go new file mode 100644 index 0000000000000..c21fa7c16d8a3 --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/storage/schema/auth_users.go @@ -0,0 +1,108 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package schema + +import ( + "go.uber.org/zap" + + "go.etcd.io/etcd/api/v3/authpb" + "go.etcd.io/etcd/server/v3/storage/backend" +) + +func (abe *authBackend) GetUser(username string) *authpb.User { + tx := abe.ReadTx() + tx.RLock() + defer tx.RUnlock() + return tx.UnsafeGetUser(username) +} + +func (atx *authBatchTx) UnsafeGetUser(username string) *authpb.User { + return unsafeGetUser(atx.lg, atx.tx, username) +} + +func (atx *authBatchTx) UnsafeGetAllUsers() []*authpb.User { + return unsafeGetAllUsers(atx.lg, atx.tx) +} + +func (atx *authBatchTx) UnsafePutUser(user *authpb.User) { + b, err := user.Marshal() + if err != nil { + atx.lg.Panic("failed to unmarshal 'authpb.User'", zap.Error(err)) + } + atx.tx.UnsafePut(AuthUsers, user.Name, b) +} + +func (atx *authBatchTx) UnsafeDeleteUser(username string) { + atx.tx.UnsafeDelete(AuthUsers, []byte(username)) +} + +func (atx *authReadTx) UnsafeGetUser(username string) *authpb.User { + return unsafeGetUser(atx.lg, atx.tx, username) +} + +func unsafeGetUser(lg *zap.Logger, tx backend.UnsafeReader, username string) *authpb.User { + _, vs := tx.UnsafeRange(AuthUsers, []byte(username), nil, 0) + if len(vs) == 0 { + return nil + } + + user := &authpb.User{} + err := user.Unmarshal(vs[0]) + if err != nil { + lg.Panic( + "failed to unmarshal 'authpb.User'", + zap.String("user-name", username), + zap.Error(err), + ) + } + return user +} + +func (abe *authBackend) GetAllUsers() []*authpb.User { + tx := abe.ReadTx() + tx.RLock() + defer tx.RUnlock() + return tx.UnsafeGetAllUsers() +} + +func (atx *authReadTx) UnsafeGetAllUsers() []*authpb.User { + return unsafeGetAllUsers(atx.lg, atx.tx) +} + +func unsafeGetAllUsers(lg *zap.Logger, tx backend.UnsafeReader) []*authpb.User { + var vs [][]byte + err := tx.UnsafeForEach(AuthUsers, func(k []byte, v []byte) error { + vs = append(vs, v) + return nil + }) + if err != nil { + lg.Panic("failed to get users", + zap.Error(err)) + } + if len(vs) == 0 { + return nil + } + + users := make([]*authpb.User, len(vs)) + for i := range vs { + user := &authpb.User{} + err := user.Unmarshal(vs[i]) + if err != nil { + lg.Panic("failed to unmarshal 'authpb.User'", zap.Error(err)) + } + users[i] = user + } + return users +} diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/buckets/bucket.go b/vendor/go.etcd.io/etcd/server/v3/storage/schema/bucket.go similarity index 70% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/buckets/bucket.go rename to vendor/go.etcd.io/etcd/server/v3/storage/schema/bucket.go index 9214f72f25102..06da660df5ee2 100644 --- a/vendor/go.etcd.io/etcd/server/v3/mvcc/buckets/bucket.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/schema/bucket.go @@ -12,12 +12,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -package buckets +package schema import ( "bytes" - "go.etcd.io/etcd/server/v3/mvcc/backend" + "go.etcd.io/etcd/client/pkg/v3/types" + "go.etcd.io/etcd/server/v3/storage/backend" ) var ( @@ -53,6 +54,8 @@ var ( AuthRoles = backend.Bucket(bucket{id: 22, name: authRolesBucketName, safeRangeBucket: false}) Test = backend.Bucket(bucket{id: 100, name: testBucketName, safeRangeBucket: false}) + + AllBuckets = []backend.Bucket{Key, Meta, Lease, Alarm, Cluster, Members, MembersRemoved, Auth, AuthUsers, AuthRoles} ) type bucket struct { @@ -67,14 +70,31 @@ func (b bucket) String() string { return string(b.Name()) } func (b bucket) IsSafeRangeBucket() bool { return b.safeRangeBucket } var ( + // Pre v3.5 + ScheduledCompactKeyName = []byte("scheduledCompactRev") + FinishedCompactKeyName = []byte("finishedCompactRev") MetaConsistentIndexKeyName = []byte("consistent_index") - MetaTermKeyName = []byte("term") + AuthEnabledKeyName = []byte("authEnabled") + AuthRevisionKeyName = []byte("authRevision") + // Since v3.5 + MetaTermKeyName = []byte("term") + MetaConfStateName = []byte("confState") + ClusterClusterVersionKeyName = []byte("clusterVersion") + ClusterDowngradeKeyName = []byte("downgrade") + // Since v3.6 + MetaStorageVersionName = []byte("storageVersion") + // Before adding new meta key please update server/etcdserver/version ) // DefaultIgnores defines buckets & keys to ignore in hash checking. func DefaultIgnores(bucket, key []byte) bool { // consistent index & term might be changed due to v2 internal sync, which // is not controllable by the user. - return bytes.Compare(bucket, Meta.Name()) == 0 && - (bytes.Compare(key, MetaTermKeyName) == 0 || bytes.Compare(key, MetaConsistentIndexKeyName) == 0) + // storage version might change after wal snapshot and is not controller by user. + return bytes.Equal(bucket, Meta.Name()) && + (bytes.Equal(key, MetaTermKeyName) || bytes.Equal(key, MetaConsistentIndexKeyName) || bytes.Equal(key, MetaStorageVersionName)) +} + +func BackendMemberKey(id types.ID) []byte { + return []byte(id.String()) } diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/schema/changes.go b/vendor/go.etcd.io/etcd/server/v3/storage/schema/changes.go new file mode 100644 index 0000000000000..6eb0b75120935 --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/storage/schema/changes.go @@ -0,0 +1,50 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package schema + +import "go.etcd.io/etcd/server/v3/storage/backend" + +type schemaChange interface { + upgradeAction() action + downgradeAction() action +} + +// addNewField represents adding new field when upgrading. Downgrade will remove the field. +func addNewField(bucket backend.Bucket, fieldName []byte, fieldValue []byte) schemaChange { + return simpleSchemaChange{ + upgrade: setKeyAction{ + Bucket: bucket, + FieldName: fieldName, + FieldValue: fieldValue, + }, + downgrade: deleteKeyAction{ + Bucket: bucket, + FieldName: fieldName, + }, + } +} + +type simpleSchemaChange struct { + upgrade action + downgrade action +} + +func (c simpleSchemaChange) upgradeAction() action { + return c.upgrade +} + +func (c simpleSchemaChange) downgradeAction() action { + return c.downgrade +} diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/schema/cindex.go b/vendor/go.etcd.io/etcd/server/v3/storage/schema/cindex.go new file mode 100644 index 0000000000000..cdf938d346c0c --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/storage/schema/cindex.go @@ -0,0 +1,95 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package schema + +import ( + "encoding/binary" + "fmt" + + "go.etcd.io/etcd/client/pkg/v3/verify" + "go.etcd.io/etcd/server/v3/storage/backend" +) + +// UnsafeCreateMetaBucket creates the `meta` bucket (if it does not exist yet). +func UnsafeCreateMetaBucket(tx backend.UnsafeWriter) { + tx.UnsafeCreateBucket(Meta) +} + +// CreateMetaBucket creates the `meta` bucket (if it does not exist yet). +func CreateMetaBucket(tx backend.BatchTx) { + tx.LockOutsideApply() + defer tx.Unlock() + tx.UnsafeCreateBucket(Meta) +} + +// UnsafeReadConsistentIndex loads consistent index & term from given transaction. +// returns 0,0 if the data are not found. +// Term is persisted since v3.5. +func UnsafeReadConsistentIndex(tx backend.UnsafeReader) (uint64, uint64) { + _, vs := tx.UnsafeRange(Meta, MetaConsistentIndexKeyName, nil, 0) + if len(vs) == 0 { + return 0, 0 + } + v := binary.BigEndian.Uint64(vs[0]) + _, ts := tx.UnsafeRange(Meta, MetaTermKeyName, nil, 0) + if len(ts) == 0 { + return v, 0 + } + t := binary.BigEndian.Uint64(ts[0]) + return v, t +} + +// ReadConsistentIndex loads consistent index and term from given transaction. +// returns 0 if the data are not found. +func ReadConsistentIndex(tx backend.ReadTx) (uint64, uint64) { + tx.RLock() + defer tx.RUnlock() + return UnsafeReadConsistentIndex(tx) +} + +func UnsafeUpdateConsistentIndexForce(tx backend.UnsafeReadWriter, index uint64, term uint64) { + unsafeUpdateConsistentIndex(tx, index, term, true) +} + +func UnsafeUpdateConsistentIndex(tx backend.UnsafeReadWriter, index uint64, term uint64) { + unsafeUpdateConsistentIndex(tx, index, term, false) +} + +func unsafeUpdateConsistentIndex(tx backend.UnsafeReadWriter, index uint64, term uint64, allowDecreasing bool) { + if index == 0 { + // Never save 0 as it means that we didn't load the real index yet. + return + } + bs1 := make([]byte, 8) + binary.BigEndian.PutUint64(bs1, index) + + if !allowDecreasing { + verify.Verify(func() { + previousIndex, _ := UnsafeReadConsistentIndex(tx) + if index < previousIndex { + panic(fmt.Errorf("update of consistent index not advancing: previous: %v new: %v", previousIndex, index)) + } + }) + } + + // put the index into the underlying backend + // tx has been locked in TxnBegin, so there is no need to lock it again + tx.UnsafePut(Meta, MetaConsistentIndexKeyName, bs1) + if term > 0 { + bs2 := make([]byte, 8) + binary.BigEndian.PutUint64(bs2, term) + tx.UnsafePut(Meta, MetaTermKeyName, bs2) + } +} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/confstate.go b/vendor/go.etcd.io/etcd/server/v3/storage/schema/confstate.go similarity index 73% rename from vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/confstate.go rename to vendor/go.etcd.io/etcd/server/v3/storage/schema/confstate.go index 3aa8c649b2e6b..c2bcb54013086 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/confstate.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/schema/confstate.go @@ -12,44 +12,40 @@ // See the License for the specific language governing permissions and // limitations under the License. -package membership +package schema import ( "encoding/json" "log" - "go.etcd.io/etcd/raft/v3/raftpb" - "go.etcd.io/etcd/server/v3/mvcc/backend" - "go.etcd.io/etcd/server/v3/mvcc/buckets" "go.uber.org/zap" -) -var ( - confStateKey = []byte("confState") + "go.etcd.io/etcd/server/v3/storage/backend" + "go.etcd.io/raft/v3/raftpb" ) // MustUnsafeSaveConfStateToBackend persists confState using given transaction (tx). // confState in backend is persisted since etcd v3.5. -func MustUnsafeSaveConfStateToBackend(lg *zap.Logger, tx backend.BatchTx, confState *raftpb.ConfState) { +func MustUnsafeSaveConfStateToBackend(lg *zap.Logger, tx backend.UnsafeWriter, confState *raftpb.ConfState) { confStateBytes, err := json.Marshal(confState) if err != nil { lg.Panic("Cannot marshal raftpb.ConfState", zap.Stringer("conf-state", confState), zap.Error(err)) } - tx.UnsafePut(buckets.Meta, confStateKey, confStateBytes) + tx.UnsafePut(Meta, MetaConfStateName, confStateBytes) } // UnsafeConfStateFromBackend retrieves ConfState from the backend. -// Returns nil if confState in backend is not persisted (e.g. backend writen by depending on the deprecation stage, warns or report an error +// if the v2store contains custom content. +func AssertNoV2StoreContent(lg *zap.Logger, st v2store.Store, deprecationStage config.V2DeprecationEnum) error { + metaOnly, err := membership.IsMetaStoreOnly(st) + if err != nil { + return err + } + if metaOnly { + return nil + } + if deprecationStage.IsAtLeast(config.V2Depr1WriteOnly) { + return fmt.Errorf("detected disallowed custom content in v2store for stage --v2-deprecation=%s", deprecationStage) + } + lg.Warn("detected custom v2store content. Etcd v3.5 is the last version allowing to access it using API v2. Please remove the content.") + return nil +} + +// CreateConfigChangeEnts creates a series of Raft entries (i.e. +// EntryConfChange) to remove the set of given IDs from the cluster. The ID +// `self` is _not_ removed, even if present in the set. +// If `self` is not inside the given ids, it creates a Raft entry to add a +// default member with the given `self`. +func CreateConfigChangeEnts(lg *zap.Logger, ids []uint64, self uint64, term, index uint64) []raftpb.Entry { + found := false + for _, id := range ids { + if id == self { + found = true + } + } + + var ents []raftpb.Entry + next := index + 1 + + // NB: always add self first, then remove other nodes. Raft will panic if the + // set of voters ever becomes empty. + if !found { + m := membership.Member{ + ID: types.ID(self), + RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://localhost:2380"}}, + } + ctx, err := json.Marshal(m) + if err != nil { + lg.Panic("failed to marshal member", zap.Error(err)) + } + cc := &raftpb.ConfChange{ + Type: raftpb.ConfChangeAddNode, + NodeID: self, + Context: ctx, + } + e := raftpb.Entry{ + Type: raftpb.EntryConfChange, + Data: pbutil.MustMarshal(cc), + Term: term, + Index: next, + } + ents = append(ents, e) + next++ + } + + for _, id := range ids { + if id == self { + continue + } + cc := &raftpb.ConfChange{ + Type: raftpb.ConfChangeRemoveNode, + NodeID: id, + } + e := raftpb.Entry{ + Type: raftpb.EntryConfChange, + Data: pbutil.MustMarshal(cc), + Term: term, + Index: next, + } + ents = append(ents, e) + next++ + } + + return ents +} + +// GetEffectiveNodeIdsFromWalEntries returns an ordered set of IDs included in the given snapshot and +// the entries. +// +// Deprecated: use GetEffectiveNodeIDsFromWALEntries instead. +// +//revive:disable-next-line:var-naming +func GetEffectiveNodeIdsFromWalEntries(lg *zap.Logger, snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 { + return GetEffectiveNodeIDsFromWALEntries(lg, snap, ents) +} + +// GetEffectiveNodeIDsFromWALEntries returns an ordered set of IDs included in the given snapshot and +// the entries. The given snapshot/entries can contain three kinds of +// ID-related entry: +// - ConfChangeAddNode, in which case the contained ID will Be added into the set. +// - ConfChangeRemoveNode, in which case the contained ID will Be removed from the set. +// - ConfChangeAddLearnerNode, in which the contained ID will Be added into the set. +func GetEffectiveNodeIDsFromWALEntries(lg *zap.Logger, snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 { + ids := make(map[uint64]bool) + if snap != nil { + for _, id := range snap.Metadata.ConfState.Voters { + ids[id] = true + } + } + for _, e := range ents { + if e.Type != raftpb.EntryConfChange { + continue + } + var cc raftpb.ConfChange + pbutil.MustUnmarshal(&cc, e.Data) + switch cc.Type { + case raftpb.ConfChangeAddLearnerNode: + ids[cc.NodeID] = true + case raftpb.ConfChangeAddNode: + ids[cc.NodeID] = true + case raftpb.ConfChangeRemoveNode: + delete(ids, cc.NodeID) + case raftpb.ConfChangeUpdateNode: + // do nothing + default: + lg.Panic("unknown ConfChange Type", zap.String("type", cc.Type.String())) + } + } + sids := make(types.Uint64Slice, 0, len(ids)) + for id := range ids { + sids = append(sids, id) + } + sort.Sort(sids) + return sids +} diff --git a/vendor/go.etcd.io/etcd/server/v3/wal/decoder.go b/vendor/go.etcd.io/etcd/server/v3/storage/wal/decoder.go similarity index 66% rename from vendor/go.etcd.io/etcd/server/v3/wal/decoder.go rename to vendor/go.etcd.io/etcd/server/v3/storage/wal/decoder.go index b8c68bef6d4f7..bdd4962e97f74 100644 --- a/vendor/go.etcd.io/etcd/server/v3/wal/decoder.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/wal/decoder.go @@ -16,6 +16,7 @@ package wal import ( "encoding/binary" + "errors" "fmt" "hash" "io" @@ -24,8 +25,8 @@ import ( "go.etcd.io/etcd/client/pkg/v3/fileutil" "go.etcd.io/etcd/pkg/v3/crc" "go.etcd.io/etcd/pkg/v3/pbutil" - "go.etcd.io/etcd/raft/v3/raftpb" - "go.etcd.io/etcd/server/v3/wal/walpb" + "go.etcd.io/etcd/server/v3/storage/wal/walpb" + "go.etcd.io/raft/v3/raftpb" ) const minSectorSize = 512 @@ -33,6 +34,13 @@ const minSectorSize = 512 // frameSizeBytes is frame size in bytes, including record size and padding size. const frameSizeBytes = 8 +type Decoder interface { + Decode(rec *walpb.Record) error + LastOffset() int64 + LastCRC() uint32 + UpdateCRC(prevCrc uint32) +} + type decoder struct { mu sync.Mutex brs []*fileutil.FileBufReader @@ -40,20 +48,35 @@ type decoder struct { // lastValidOff file offset following the last valid decoded record lastValidOff int64 crc hash.Hash32 + + // continueOnCrcError - causes the decoder to continue working even in case of crc mismatch. + // This is a desired mode for tools performing inspection of the corrupted WAL logs. + // See comments on 'Decode' method for semantic. + continueOnCrcError bool } -func newDecoder(r ...fileutil.FileReader) *decoder { +func NewDecoderAdvanced(continueOnCrcError bool, r ...fileutil.FileReader) Decoder { readers := make([]*fileutil.FileBufReader, len(r)) for i := range r { readers[i] = fileutil.NewFileBufReader(r[i]) } return &decoder{ - brs: readers, - crc: crc.New(0, crcTable), + brs: readers, + crc: crc.New(0, crcTable), + continueOnCrcError: continueOnCrcError, } } -func (d *decoder) decode(rec *walpb.Record) error { +func NewDecoder(r ...fileutil.FileReader) Decoder { + return NewDecoderAdvanced(false, r...) +} + +// Decode reads the next record out of the file. +// In the success path, fills 'rec' and returns nil. +// When it fails, it returns err and usually resets 'rec' to the defaults. +// When continueOnCrcError is set, the method may return ErrUnexpectedEOF or ErrCRCMismatch, but preserve the read +// (potentially corrupted) record content. +func (d *decoder) Decode(rec *walpb.Record) error { rec.Reset() d.mu.Lock() defer d.mu.Unlock() @@ -67,7 +90,7 @@ func (d *decoder) decodeRecord(rec *walpb.Record) error { fileBufReader := d.brs[0] l, err := readInt64(fileBufReader) - if err == io.EOF || (err == nil && l == 0) { + if errors.Is(err, io.EOF) || (err == nil && l == 0) { // hit end of file or preallocated space d.brs = d.brs[1:] if len(d.brs) == 0 { @@ -84,7 +107,7 @@ func (d *decoder) decodeRecord(rec *walpb.Record) error { // The length of current WAL entry must be less than the remaining file size. maxEntryLimit := fileBufReader.FileInfo().Size() - d.lastValidOff - padBytes if recBytes > maxEntryLimit { - return fmt.Errorf("%w: [wal] max entry size limit exceeded when decoding %q, recBytes: %d, fileSize(%d) - offset(%d) - padBytes(%d) = entryLimit(%d)", + return fmt.Errorf("%w: [wal] max entry size limit exceeded when reading %q, recBytes: %d, fileSize(%d) - offset(%d) - padBytes(%d) = entryLimit(%d)", io.ErrUnexpectedEOF, fileBufReader.FileInfo().Name(), recBytes, fileBufReader.FileInfo().Size(), d.lastValidOff, padBytes, maxEntryLimit) } @@ -92,7 +115,7 @@ func (d *decoder) decodeRecord(rec *walpb.Record) error { if _, err = io.ReadFull(fileBufReader, data); err != nil { // ReadFull returns io.EOF only if no bytes were read // the decoder should treat this as an ErrUnexpectedEOF instead. - if err == io.EOF { + if errors.Is(err, io.EOF) { err = io.ErrUnexpectedEOF } return err @@ -104,14 +127,24 @@ func (d *decoder) decodeRecord(rec *walpb.Record) error { return err } - // skip crc checking if the record type is crcType - if rec.Type != crcType { - d.crc.Write(rec.Data) + // skip crc checking if the record type is CrcType + if rec.Type != CrcType { + _, err := d.crc.Write(rec.Data) + if err != nil { + return err + } if err := rec.Validate(d.crc.Sum32()); err != nil { + if !d.continueOnCrcError { + rec.Reset() + } else { + // If we continue, we want to update lastValidOff, such that following errors are consistent + defer func() { d.lastValidOff += frameSizeBytes + recBytes + padBytes }() + } + if d.isTornEntry(data) { - return io.ErrUnexpectedEOF + return fmt.Errorf("%w: in file '%s' at position: %d", io.ErrUnexpectedEOF, fileBufReader.FileInfo().Name(), d.lastValidOff) } - return err + return fmt.Errorf("%w: in file '%s' at position: %d", err, fileBufReader.FileInfo().Name(), d.lastValidOff) } } // record decoded as valid; point last valid offset to end of record @@ -139,7 +172,7 @@ func (d *decoder) isTornEntry(data []byte) bool { fileOff := d.lastValidOff + frameSizeBytes curOff := 0 - chunks := [][]byte{} + var chunks [][]byte // split data on sector boundaries for curOff < len(data) { chunkLen := int(minSectorSize - (fileOff % minSectorSize)) @@ -167,23 +200,23 @@ func (d *decoder) isTornEntry(data []byte) bool { return false } -func (d *decoder) updateCRC(prevCrc uint32) { +func (d *decoder) UpdateCRC(prevCrc uint32) { d.crc = crc.New(prevCrc, crcTable) } -func (d *decoder) lastCRC() uint32 { +func (d *decoder) LastCRC() uint32 { return d.crc.Sum32() } -func (d *decoder) lastOffset() int64 { return d.lastValidOff } +func (d *decoder) LastOffset() int64 { return d.lastValidOff } -func mustUnmarshalEntry(d []byte) raftpb.Entry { +func MustUnmarshalEntry(d []byte) raftpb.Entry { var e raftpb.Entry pbutil.MustUnmarshal(&e, d) return e } -func mustUnmarshalState(d []byte) raftpb.HardState { +func MustUnmarshalState(d []byte) raftpb.HardState { var s raftpb.HardState pbutil.MustUnmarshal(&s, d) return s diff --git a/vendor/go.etcd.io/etcd/server/v3/wal/doc.go b/vendor/go.etcd.io/etcd/server/v3/storage/wal/doc.go similarity index 85% rename from vendor/go.etcd.io/etcd/server/v3/wal/doc.go rename to vendor/go.etcd.io/etcd/server/v3/storage/wal/doc.go index 4288495461c21..0f7ef8527bea2 100644 --- a/vendor/go.etcd.io/etcd/server/v3/wal/doc.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/wal/doc.go @@ -13,11 +13,11 @@ // limitations under the License. /* -Package wal provides an implementation of a write ahead log that is used by +Package wal provides an implementation of write ahead log that is used by etcd. A WAL is created at a particular directory and is made up of a number of -segmented WAL files. Inside of each file the raft state and entries are appended +segmented WAL files. Inside each file the raft state and entries are appended to it with the Save method: metadata := []byte{} @@ -41,18 +41,18 @@ protobuf. The record protobuf contains a CRC, a type, and a data payload. The le record is 8-byte aligned so that the length field is never torn. The CRC contains the CRC32 value of all record protobufs preceding the current record. -WAL files are placed inside of the directory in the following format: +WAL files are placed inside the directory in the following format: $seq-$index.wal The first WAL file to be created will be 0000000000000000-0000000000000000.wal indicating an initial sequence of 0 and an initial raft index of 0. The first entry written to WAL MUST have raft index 0. -WAL will cut its current tail wal file if its size exceeds 64MB. This will increment an internal +WAL will cut its current tail wal file if its size exceeds 64 MB. This will increment an internal sequence number and cause a new file to be created. If the last raft index saved was 0x20 and this is the first time cut has been called on this WAL then the sequence will increment from 0x0 to 0x1. The new file will be: 0000000000000001-0000000000000021.wal. -If a second cut issues 0x10 entries with incremental index later then the file will be called: +If a second cut issues 0x10 entries with incremental index later, then the file will be called: 0000000000000002-0000000000000031.wal. At a later time a WAL can be opened at a particular snapshot. If there is no @@ -63,7 +63,7 @@ snapshot, an empty snapshot should be passed in. The snapshot must have been written to the WAL. -Additional items cannot be Saved to this WAL until all of the items from the given +Additional items cannot be Saved to this WAL until all the items from the given snapshot to the end of the WAL are read first: metadata, state, ents, err := w.ReadAll() diff --git a/vendor/go.etcd.io/etcd/server/v3/wal/encoder.go b/vendor/go.etcd.io/etcd/server/v3/storage/wal/encoder.go similarity index 96% rename from vendor/go.etcd.io/etcd/server/v3/wal/encoder.go rename to vendor/go.etcd.io/etcd/server/v3/storage/wal/encoder.go index 2642e156c889f..5944ba7b120f5 100644 --- a/vendor/go.etcd.io/etcd/server/v3/wal/encoder.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/wal/encoder.go @@ -24,7 +24,7 @@ import ( "go.etcd.io/etcd/pkg/v3/crc" "go.etcd.io/etcd/pkg/v3/ioutil" - "go.etcd.io/etcd/server/v3/wal/walpb" + "go.etcd.io/etcd/server/v3/storage/wal/walpb" ) // walPageBytes is the alignment for flushing records to the backing Writer. @@ -102,10 +102,8 @@ func encodeFrameSize(dataBytes int) (lenField uint64, padBytes int) { func (e *encoder) flush() error { e.mu.Lock() - n, err := e.bw.FlushN() - e.mu.Unlock() - walWriteBytes.Add(float64(n)) - return err + defer e.mu.Unlock() + return e.bw.Flush() } func prepareDataWithPadding(data []byte) ([]byte, uint64) { diff --git a/vendor/go.etcd.io/etcd/server/v3/wal/file_pipeline.go b/vendor/go.etcd.io/etcd/server/v3/storage/wal/file_pipeline.go similarity index 94% rename from vendor/go.etcd.io/etcd/server/v3/wal/file_pipeline.go rename to vendor/go.etcd.io/etcd/server/v3/storage/wal/file_pipeline.go index c2feba6c1dc80..c8ee4cce429f5 100644 --- a/vendor/go.etcd.io/etcd/server/v3/wal/file_pipeline.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/wal/file_pipeline.go @@ -19,9 +19,9 @@ import ( "os" "path/filepath" - "go.etcd.io/etcd/client/pkg/v3/fileutil" - "go.uber.org/zap" + + "go.etcd.io/etcd/client/pkg/v3/fileutil" ) // filePipeline pipelines allocating disk space @@ -58,6 +58,7 @@ func newFilePipeline(lg *zap.Logger, dir string, fileSize int64) *filePipeline { // Open returns a fresh file for writing. Rename the file before calling // Open again or there will be file collisions. +// it will 'block' if the tmp file lock is already taken. func (fp *filePipeline) Open() (f *fileutil.LockedFile, err error) { select { case f = <-fp.filec: @@ -74,7 +75,7 @@ func (fp *filePipeline) Close() error { func (fp *filePipeline) alloc() (f *fileutil.LockedFile, err error) { // count % 2 so this file isn't the same as the one last published fpath := filepath.Join(fp.dir, fmt.Sprintf("%d.tmp", fp.count%2)) - if f, err = fileutil.LockFile(fpath, os.O_CREATE|os.O_WRONLY, fileutil.PrivateFileMode); err != nil { + if f, err = createNewWALFile[*fileutil.LockedFile](fpath, false); err != nil { return nil, err } if err = fileutil.Preallocate(f.File, fp.size, true); err != nil { diff --git a/vendor/go.etcd.io/etcd/server/v3/wal/metrics.go b/vendor/go.etcd.io/etcd/server/v3/storage/wal/metrics.go similarity index 100% rename from vendor/go.etcd.io/etcd/server/v3/wal/metrics.go rename to vendor/go.etcd.io/etcd/server/v3/storage/wal/metrics.go diff --git a/vendor/go.etcd.io/etcd/server/v3/wal/repair.go b/vendor/go.etcd.io/etcd/server/v3/storage/wal/repair.go similarity index 83% rename from vendor/go.etcd.io/etcd/server/v3/wal/repair.go rename to vendor/go.etcd.io/etcd/server/v3/storage/wal/repair.go index dec2fa240b633..16277540f34d3 100644 --- a/vendor/go.etcd.io/etcd/server/v3/wal/repair.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/wal/repair.go @@ -21,9 +21,10 @@ import ( "path/filepath" "time" - "go.etcd.io/etcd/client/pkg/v3/fileutil" - "go.etcd.io/etcd/server/v3/wal/walpb" "go.uber.org/zap" + + "go.etcd.io/etcd/client/pkg/v3/fileutil" + "go.etcd.io/etcd/server/v3/storage/wal/walpb" ) // Repair tries to repair ErrUnexpectedEOF in the @@ -41,22 +42,22 @@ func Repair(lg *zap.Logger, dirpath string) bool { lg.Info("repairing", zap.String("path", f.Name())) rec := &walpb.Record{} - decoder := newDecoder(fileutil.NewFileReader(f.File)) + decoder := NewDecoder(fileutil.NewFileReader(f.File)) for { - lastOffset := decoder.lastOffset() - err := decoder.decode(rec) + lastOffset := decoder.LastOffset() + err := decoder.Decode(rec) switch { case err == nil: // update crc of the decoder when necessary switch rec.Type { - case crcType: - crc := decoder.crc.Sum32() + case CrcType: + crc := decoder.LastCRC() // current crc of decoder must match the crc of the record. // do no need to match 0 crc, since the decoder is a new one at this case. if crc != 0 && rec.Validate(crc) != nil { return false } - decoder.updateCRC(rec.Crc) + decoder.UpdateCRC(rec.Crc) } continue @@ -65,9 +66,10 @@ func Repair(lg *zap.Logger, dirpath string) bool { return true case errors.Is(err, io.ErrUnexpectedEOF): - bf, bferr := os.Create(f.Name() + ".broken") + brokenName := f.Name() + ".broken" + bf, bferr := createNewWALFile[*os.File](brokenName, true) if bferr != nil { - lg.Warn("failed to create backup file", zap.String("path", f.Name()+".broken"), zap.Error(bferr)) + lg.Warn("failed to create backup file", zap.String("path", brokenName), zap.Error(bferr)) return false } defer bf.Close() @@ -78,7 +80,7 @@ func Repair(lg *zap.Logger, dirpath string) bool { } if _, err = io.Copy(bf, f); err != nil { - lg.Warn("failed to copy", zap.String("from", f.Name()+".broken"), zap.String("to", f.Name()), zap.Error(err)) + lg.Warn("failed to copy", zap.String("from", f.Name()), zap.String("to", brokenName), zap.Error(err)) return false } diff --git a/vendor/go.etcd.io/etcd/server/v3/wal/util.go b/vendor/go.etcd.io/etcd/server/v3/storage/wal/util.go similarity index 94% rename from vendor/go.etcd.io/etcd/server/v3/wal/util.go rename to vendor/go.etcd.io/etcd/server/v3/storage/wal/util.go index 4a21ae6182fdb..8dec85c15d5a2 100644 --- a/vendor/go.etcd.io/etcd/server/v3/wal/util.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/wal/util.go @@ -19,9 +19,9 @@ import ( "fmt" "strings" - "go.etcd.io/etcd/client/pkg/v3/fileutil" - "go.uber.org/zap" + + "go.etcd.io/etcd/client/pkg/v3/fileutil" ) var errBadWALName = errors.New("bad wal name") @@ -72,16 +72,16 @@ func isValidSeq(lg *zap.Logger, names []string) bool { func readWALNames(lg *zap.Logger, dirpath string) ([]string, error) { names, err := fileutil.ReadDir(dirpath) if err != nil { - return nil, err + return nil, fmt.Errorf("[readWALNames] fileutil.ReadDir failed: %w", err) } - wnames := checkWalNames(lg, names) + wnames := checkWALNames(lg, names) if len(wnames) == 0 { return nil, ErrFileNotFound } return wnames, nil } -func checkWalNames(lg *zap.Logger, names []string) []string { +func checkWALNames(lg *zap.Logger, names []string) []string { wnames := make([]string, 0) for _, name := range names { if _, _, err := parseWALName(name); err != nil { diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/wal/version.go b/vendor/go.etcd.io/etcd/server/v3/storage/wal/version.go new file mode 100644 index 0000000000000..8ed47c783d851 --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/storage/wal/version.go @@ -0,0 +1,285 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package wal + +import ( + "fmt" + "strings" + + "github.com/coreos/go-semver/semver" + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/descriptorpb" + + "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.etcd.io/etcd/api/v3/version" + "go.etcd.io/etcd/pkg/v3/pbutil" + "go.etcd.io/raft/v3/raftpb" +) + +// ReadWALVersion reads remaining entries from opened WAL and returns struct +// that implements schema.WAL interface. +func ReadWALVersion(w *WAL) (*walVersion, error) { + _, _, ents, err := w.ReadAll() + if err != nil { + return nil, err + } + return &walVersion{entries: ents}, nil +} + +type walVersion struct { + entries []raftpb.Entry +} + +// MinimalEtcdVersion returns minimal etcd able to interpret entries from WAL log, +func (w *walVersion) MinimalEtcdVersion() *semver.Version { + return MinimalEtcdVersion(w.entries) +} + +// MinimalEtcdVersion returns minimal etcd able to interpret entries from WAL log, +// determined by looking at entries since the last snapshot and returning the highest +// etcd version annotation from used messages, fields, enums and their values. +func MinimalEtcdVersion(ents []raftpb.Entry) *semver.Version { + var maxVer *semver.Version + for _, ent := range ents { + err := visitEntry(ent, func(path protoreflect.FullName, ver *semver.Version) error { + maxVer = maxVersion(maxVer, ver) + return nil + }) + if err != nil { + panic(err) + } + } + return maxVer +} + +type Visitor func(path protoreflect.FullName, ver *semver.Version) error + +// VisitFileDescriptor calls visitor on each field and enum value with etcd version read from proto definition. +// If field/enum value is not annotated, visitor will be called with nil. +// Upon encountering invalid annotation, will immediately exit with error. +func VisitFileDescriptor(file protoreflect.FileDescriptor, visitor Visitor) error { + msgs := file.Messages() + for i := 0; i < msgs.Len(); i++ { + err := visitMessageDescriptor(msgs.Get(i), visitor) + if err != nil { + return err + } + } + enums := file.Enums() + for i := 0; i < enums.Len(); i++ { + err := visitEnumDescriptor(enums.Get(i), visitor) + if err != nil { + return err + } + } + return nil +} + +func visitEntry(ent raftpb.Entry, visitor Visitor) error { + err := visitMessage(proto.MessageReflect(&ent), visitor) + if err != nil { + return err + } + return visitEntryData(ent.Type, ent.Data, visitor) +} + +func visitEntryData(entryType raftpb.EntryType, data []byte, visitor Visitor) error { + var msg protoreflect.Message + switch entryType { + case raftpb.EntryNormal: + var raftReq etcdserverpb.InternalRaftRequest + if err := pbutil.Unmarshaler(&raftReq).Unmarshal(data); err != nil { + // try V2 Request + var r etcdserverpb.Request + if pbutil.Unmarshaler(&r).Unmarshal(data) != nil { + // return original error + return err + } + msg = proto.MessageReflect(&r) + break + } + msg = proto.MessageReflect(&raftReq) + if raftReq.ClusterVersionSet != nil { + ver, err := semver.NewVersion(raftReq.ClusterVersionSet.Ver) + if err != nil { + return err + } + err = visitor(msg.Descriptor().FullName(), ver) + if err != nil { + return err + } + } + case raftpb.EntryConfChange: + var confChange raftpb.ConfChange + err := pbutil.Unmarshaler(&confChange).Unmarshal(data) + if err != nil { + return nil + } + msg = proto.MessageReflect(&confChange) + return visitor(msg.Descriptor().FullName(), &version.V3_0) + case raftpb.EntryConfChangeV2: + var confChange raftpb.ConfChangeV2 + err := pbutil.Unmarshaler(&confChange).Unmarshal(data) + if err != nil { + return nil + } + msg = proto.MessageReflect(&confChange) + return visitor(msg.Descriptor().FullName(), &version.V3_4) + default: + panic("unhandled") + } + return visitMessage(msg, visitor) +} + +func visitMessageDescriptor(md protoreflect.MessageDescriptor, visitor Visitor) error { + err := visitDescriptor(md, visitor) + if err != nil { + return err + } + fields := md.Fields() + for i := 0; i < fields.Len(); i++ { + fd := fields.Get(i) + err = visitDescriptor(fd, visitor) + if err != nil { + return err + } + } + + enums := md.Enums() + for i := 0; i < enums.Len(); i++ { + err = visitEnumDescriptor(enums.Get(i), visitor) + if err != nil { + return err + } + } + return err +} + +func visitMessage(m protoreflect.Message, visitor Visitor) error { + md := m.Descriptor() + err := visitDescriptor(md, visitor) + if err != nil { + return err + } + m.Range(func(field protoreflect.FieldDescriptor, value protoreflect.Value) bool { + fd := md.Fields().Get(field.Index()) + err = visitDescriptor(fd, visitor) + if err != nil { + return false + } + + switch m := value.Interface().(type) { + case protoreflect.Message: + err = visitMessage(m, visitor) + case protoreflect.EnumNumber: + err = visitEnumNumber(fd.Enum(), m, visitor) + } + if err != nil { + return false + } + return true + }) + return err +} + +func visitEnumDescriptor(enum protoreflect.EnumDescriptor, visitor Visitor) error { + err := visitDescriptor(enum, visitor) + if err != nil { + return err + } + fields := enum.Values() + for i := 0; i < fields.Len(); i++ { + fd := fields.Get(i) + err = visitDescriptor(fd, visitor) + if err != nil { + return err + } + } + return err +} + +func visitEnumNumber(enum protoreflect.EnumDescriptor, number protoreflect.EnumNumber, visitor Visitor) error { + err := visitDescriptor(enum, visitor) + if err != nil { + return err + } + intNumber := int(number) + fields := enum.Values() + if intNumber >= fields.Len() || intNumber < 0 { + return fmt.Errorf("could not visit EnumNumber [%d]", intNumber) + } + return visitEnumValue(fields.Get(intNumber), visitor) +} + +func visitEnumValue(enum protoreflect.EnumValueDescriptor, visitor Visitor) error { + valueOpts := enum.Options().(*descriptorpb.EnumValueOptions) + if valueOpts != nil { + ver, _ := etcdVersionFromOptionsString(valueOpts.String()) + err := visitor(enum.FullName(), ver) + if err != nil { + return err + } + } + return nil +} + +func visitDescriptor(md protoreflect.Descriptor, visitor Visitor) error { + opts, ok := md.Options().(fmt.Stringer) + if !ok { + return nil + } + ver, err := etcdVersionFromOptionsString(opts.String()) + if err != nil { + return fmt.Errorf("%s: %w", md.FullName(), err) + } + return visitor(md.FullName(), ver) +} + +func maxVersion(a *semver.Version, b *semver.Version) *semver.Version { + if a != nil && (b == nil || b.LessThan(*a)) { + return a + } + return b +} + +func etcdVersionFromOptionsString(opts string) (*semver.Version, error) { + // TODO: Use proto.GetExtention when gogo/protobuf is usable with protoreflect + msgs := []string{"[versionpb.etcd_version_msg]:", "[versionpb.etcd_version_field]:", "[versionpb.etcd_version_enum]:", "[versionpb.etcd_version_enum_value]:"} + var end, index int + for _, msg := range msgs { + index = strings.Index(opts, msg) + end = index + len(msg) + if index != -1 { + break + } + } + if index == -1 { + return nil, nil + } + var verStr string + _, err := fmt.Sscanf(opts[end:], "%q", &verStr) + if err != nil { + return nil, err + } + if strings.Count(verStr, ".") == 1 { + verStr = verStr + ".0" + } + ver, err := semver.NewVersion(verStr) + if err != nil { + return nil, err + } + return ver, nil +} diff --git a/vendor/go.etcd.io/etcd/server/v3/wal/wal.go b/vendor/go.etcd.io/etcd/server/v3/storage/wal/wal.go similarity index 87% rename from vendor/go.etcd.io/etcd/server/v3/wal/wal.go rename to vendor/go.etcd.io/etcd/server/v3/storage/wal/wal.go index db9007877066e..d26a4c20cb84a 100644 --- a/vendor/go.etcd.io/etcd/server/v3/wal/wal.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/wal/wal.go @@ -26,21 +26,21 @@ import ( "sync" "time" + "go.uber.org/zap" + "go.etcd.io/etcd/client/pkg/v3/fileutil" "go.etcd.io/etcd/pkg/v3/pbutil" - "go.etcd.io/etcd/raft/v3" - "go.etcd.io/etcd/raft/v3/raftpb" - "go.etcd.io/etcd/server/v3/wal/walpb" - - "go.uber.org/zap" + "go.etcd.io/etcd/server/v3/storage/wal/walpb" + "go.etcd.io/raft/v3" + "go.etcd.io/raft/v3/raftpb" ) const ( - metadataType int64 = iota + 1 - entryType - stateType - crcType - snapshotType + MetadataType int64 = iota + 1 + EntryType + StateType + CrcType + SnapshotType // warnSyncDuration is the amount of time allotted to an fsync before // logging a warning @@ -56,7 +56,7 @@ var ( ErrMetadataConflict = errors.New("wal: conflicting metadata found") ErrFileNotFound = errors.New("wal: file not found") - ErrCRCMismatch = errors.New("wal: crc mismatch") + ErrCRCMismatch = walpb.ErrCRCMismatch ErrSnapshotMismatch = errors.New("wal: snapshot mismatch") ErrSnapshotNotFound = errors.New("wal: snapshot not found") ErrSliceOutOfRange = errors.New("wal: slice bounds out of range") @@ -81,8 +81,8 @@ type WAL struct { state raftpb.HardState // hardstate recorded at the head of WAL start walpb.Snapshot // snapshot to start reading - decoder *decoder // decoder to decode records - readClose func() error // closer for decode reader + decoder Decoder // decoder to Decode records + readClose func() error // closer for Decode reader unsafeNoSync bool // if set, do not fsync @@ -126,7 +126,7 @@ func Create(lg *zap.Logger, dirpath string, metadata []byte) (*WAL, error) { } p := filepath.Join(tmpdirpath, walName(0, 0)) - f, err := fileutil.LockFile(p, os.O_WRONLY|os.O_CREATE, fileutil.PrivateFileMode) + f, err := createNewWALFile[*fileutil.LockedFile](p, false) if err != nil { lg.Warn( "failed to flock an initial WAL file", @@ -166,7 +166,7 @@ func Create(lg *zap.Logger, dirpath string, metadata []byte) (*WAL, error) { if err = w.saveCrc(0); err != nil { return nil, err } - if err = w.encoder.encode(&walpb.Record{Type: metadataType, Data: metadata}); err != nil { + if err = w.encoder.encode(&walpb.Record{Type: MetadataType, Data: metadata}); err != nil { return nil, err } if err = w.SaveSnapshot(walpb.Snapshot{}); err != nil { @@ -233,6 +233,39 @@ func Create(lg *zap.Logger, dirpath string, metadata []byte) (*WAL, error) { return w, nil } +// createNewWALFile creates a WAL file. +// To create a locked file, use *fileutil.LockedFile type parameter. +// To create a standard file, use *os.File type parameter. +// If forceNew is true, the file will be truncated if it already exists. +func createNewWALFile[T *os.File | *fileutil.LockedFile](path string, forceNew bool) (T, error) { + flag := os.O_WRONLY | os.O_CREATE + if forceNew { + flag |= os.O_TRUNC + } + + if _, isLockedFile := any(T(nil)).(*fileutil.LockedFile); isLockedFile { + lockedFile, err := fileutil.LockFile(path, flag, fileutil.PrivateFileMode) + if err != nil { + return nil, err + } + return any(lockedFile).(T), nil + } + + file, err := os.OpenFile(path, flag, fileutil.PrivateFileMode) + if err != nil { + return nil, err + } + return any(file).(T), nil +} + +func (w *WAL) Reopen(lg *zap.Logger, snap walpb.Snapshot) (*WAL, error) { + err := w.Close() + if err != nil { + lg.Panic("failed to close WAL during reopen", zap.Error(err)) + } + return Open(lg, w.dir, snap) +} + func (w *WAL) SetUnsafeNoFsync() { w.unsafeNoSync = true } @@ -264,7 +297,8 @@ func (w *WAL) renameWAL(tmpdirpath string) (*WAL, error) { // but there is a window between the fork and the exec where another // process holds the lock. if err := os.Rename(tmpdirpath, w.dir); err != nil { - if _, ok := err.(*os.LinkError); ok { + var linkErr *os.LinkError + if errors.As(err, &linkErr) { return w.renameWALUnlock(tmpdirpath) } return nil, err @@ -310,10 +344,10 @@ func (w *WAL) renameWALUnlock(tmpdirpath string) (*WAL, error) { func Open(lg *zap.Logger, dirpath string, snap walpb.Snapshot) (*WAL, error) { w, err := openAtIndex(lg, dirpath, snap, true) if err != nil { - return nil, err + return nil, fmt.Errorf("openAtIndex failed: %w", err) } if w.dirFile, err = fileutil.OpenDir(w.dir); err != nil { - return nil, err + return nil, fmt.Errorf("fileutil.OpenDir failed: %w", err) } return w, nil } @@ -330,12 +364,12 @@ func openAtIndex(lg *zap.Logger, dirpath string, snap walpb.Snapshot, write bool } names, nameIndex, err := selectWALFiles(lg, dirpath, snap) if err != nil { - return nil, err + return nil, fmt.Errorf("[openAtIndex] selectWALFiles failed: %w", err) } rs, ls, closer, err := openWALFiles(lg, dirpath, names, nameIndex, write) if err != nil { - return nil, err + return nil, fmt.Errorf("[openAtIndex] openWALFiles failed: %w", err) } // create a WAL ready for reading @@ -343,7 +377,7 @@ func openAtIndex(lg *zap.Logger, dirpath string, snap walpb.Snapshot, write bool lg: lg, dir: dirpath, start: snap, - decoder: newDecoder(rs...), + decoder: NewDecoder(rs...), readClose: closer, locks: ls, } @@ -354,7 +388,7 @@ func openAtIndex(lg *zap.Logger, dirpath string, snap walpb.Snapshot, write bool w.readClose = nil if _, _, err := parseWALName(filepath.Base(w.tail().Name())); err != nil { closer() - return nil, err + return nil, fmt.Errorf("[openAtIndex] parseWALName failed: %w", err) } w.fp = newFilePipeline(lg, w.dir, SegmentSizeBytes) } @@ -365,13 +399,16 @@ func openAtIndex(lg *zap.Logger, dirpath string, snap walpb.Snapshot, write bool func selectWALFiles(lg *zap.Logger, dirpath string, snap walpb.Snapshot) ([]string, int, error) { names, err := readWALNames(lg, dirpath) if err != nil { - return nil, -1, err + return nil, -1, fmt.Errorf("readWALNames failed: %w", err) } nameIndex, ok := searchIndex(lg, names, snap.Index) - if !ok || !isValidSeq(lg, names[nameIndex:]) { - err = ErrFileNotFound - return nil, -1, err + if !ok { + return nil, -1, fmt.Errorf("wal: file not found which matches the snapshot index '%d'", snap.Index) + } + + if !isValidSeq(lg, names[nameIndex:]) { + return nil, -1, fmt.Errorf("wal: file sequence numbers (starting from %d) do not increase continuously", nameIndex) } return names, nameIndex, nil @@ -388,7 +425,7 @@ func openWALFiles(lg *zap.Logger, dirpath string, names []string, nameIndex int, l, err := fileutil.TryLockFile(p, os.O_RDWR, fileutil.PrivateFileMode) if err != nil { closeAll(lg, rcs...) - return nil, nil, nil, err + return nil, nil, nil, fmt.Errorf("[openWALFiles] fileutil.TryLockFile failed: %w", err) } ls = append(ls, l) rcs = append(rcs, l) @@ -397,7 +434,7 @@ func openWALFiles(lg *zap.Logger, dirpath string, names []string, nameIndex int, rf, err := os.OpenFile(p, os.O_RDONLY, fileutil.PrivateFileMode) if err != nil { closeAll(lg, rcs...) - return nil, nil, nil, err + return nil, nil, nil, fmt.Errorf("[openWALFiles] os.OpenFile failed (%q): %w", p, err) } ls = append(ls, nil) rcs = append(rcs, rf) @@ -427,7 +464,7 @@ func openWALFiles(lg *zap.Logger, dirpath string, names []string, nameIndex int, // exists in the log). Such a situation can happen in cases described in figure 7. of the // RAFT paper (http://web.stanford.edu/~ouster/cgi-bin/papers/raft-atc14.pdf). // -// ReadAll may return uncommitted yet entries, that are subject to be overriden. +// ReadAll may return uncommitted yet entries, that are subject to be overridden. // Do not apply entries that have index > state.commit, as they are subject to change. func (w *WAL) ReadAll() (metadata []byte, state raftpb.HardState, ents []raftpb.Entry, err error) { w.mu.Lock() @@ -441,10 +478,10 @@ func (w *WAL) ReadAll() (metadata []byte, state raftpb.HardState, ents []raftpb. decoder := w.decoder var match bool - for err = decoder.decode(rec); err == nil; err = decoder.decode(rec) { + for err = decoder.Decode(rec); err == nil; err = decoder.Decode(rec) { switch rec.Type { - case entryType: - e := mustUnmarshalEntry(rec.Data) + case EntryType: + e := MustUnmarshalEntry(rec.Data) // 0 <= e.Index-w.start.Index - 1 < len(ents) if e.Index > w.start.Index { // prevent "panic: runtime error: slice bounds out of range [:13038096702221461992] with capacity 0" @@ -458,27 +495,27 @@ func (w *WAL) ReadAll() (metadata []byte, state raftpb.HardState, ents []raftpb. } w.enti = e.Index - case stateType: - state = mustUnmarshalState(rec.Data) + case StateType: + state = MustUnmarshalState(rec.Data) - case metadataType: + case MetadataType: if metadata != nil && !bytes.Equal(metadata, rec.Data) { state.Reset() return nil, state, nil, ErrMetadataConflict } metadata = rec.Data - case crcType: - crc := decoder.crc.Sum32() + case CrcType: + crc := decoder.LastCRC() // current crc of decoder must match the crc of the record. // do no need to match 0 crc, since the decoder is a new one at this case. if crc != 0 && rec.Validate(crc) != nil { state.Reset() return nil, state, nil, ErrCRCMismatch } - decoder.updateCRC(rec.Crc) + decoder.UpdateCRC(rec.Crc) - case snapshotType: + case SnapshotType: var snap walpb.Snapshot pbutil.MustUnmarshal(&snap, rec.Data) if snap.Index == w.start.Index { @@ -499,7 +536,7 @@ func (w *WAL) ReadAll() (metadata []byte, state raftpb.HardState, ents []raftpb. case nil: // We do not have to read out all entries in read mode. // The last record maybe a partial written one, so - // ErrunexpectedEOF might be returned. + // `io.ErrUnexpectedEOF` might be returned. if !errors.Is(err, io.EOF) && !errors.Is(err, io.ErrUnexpectedEOF) { state.Reset() return nil, state, nil, err @@ -516,7 +553,7 @@ func (w *WAL) ReadAll() (metadata []byte, state raftpb.HardState, ents []raftpb. // not all, will cause CRC errors on WAL open. Since the records // were never fully synced to disk in the first place, it's safe // to zero them out to avoid any CRC errors from new writes. - if _, err = w.tail().Seek(w.decoder.lastOffset(), io.SeekStart); err != nil { + if _, err = w.tail().Seek(w.decoder.LastOffset(), io.SeekStart); err != nil { return nil, state, nil, err } if err = fileutil.ZeroToEnd(w.tail().File); err != nil { @@ -540,9 +577,9 @@ func (w *WAL) ReadAll() (metadata []byte, state raftpb.HardState, ents []raftpb. if w.tail() != nil { // create encoder (chain crc with the decoder), enable appending - w.encoder, err = newFileEncoder(w.tail().File, w.decoder.lastCRC()) + w.encoder, err = newFileEncoder(w.tail().File, w.decoder.LastCRC()) if err != nil { - return + return nil, state, nil, err } } w.decoder = nil @@ -576,24 +613,24 @@ func ValidSnapshotEntries(lg *zap.Logger, walDir string) ([]walpb.Snapshot, erro }() // create a new decoder from the readers on the WAL files - decoder := newDecoder(rs...) + decoder := NewDecoder(rs...) - for err = decoder.decode(rec); err == nil; err = decoder.decode(rec) { + for err = decoder.Decode(rec); err == nil; err = decoder.Decode(rec) { switch rec.Type { - case snapshotType: + case SnapshotType: var loadedSnap walpb.Snapshot pbutil.MustUnmarshal(&loadedSnap, rec.Data) snaps = append(snaps, loadedSnap) - case stateType: - state = mustUnmarshalState(rec.Data) - case crcType: - crc := decoder.crc.Sum32() + case StateType: + state = MustUnmarshalState(rec.Data) + case CrcType: + crc := decoder.LastCRC() // current crc of decoder must match the crc of the record. // do no need to match 0 crc, since the decoder is a new one at this case. if crc != 0 && rec.Validate(crc) != nil { return nil, ErrCRCMismatch } - decoder.updateCRC(rec.Crc) + decoder.UpdateCRC(rec.Crc) } } // We do not have to read out all the WAL entries @@ -650,24 +687,24 @@ func Verify(lg *zap.Logger, walDir string, snap walpb.Snapshot) (*raftpb.HardSta }() // create a new decoder from the readers on the WAL files - decoder := newDecoder(rs...) + decoder := NewDecoder(rs...) - for err = decoder.decode(rec); err == nil; err = decoder.decode(rec) { + for err = decoder.Decode(rec); err == nil; err = decoder.Decode(rec) { switch rec.Type { - case metadataType: + case MetadataType: if metadata != nil && !bytes.Equal(metadata, rec.Data) { return nil, ErrMetadataConflict } metadata = rec.Data - case crcType: - crc := decoder.crc.Sum32() + case CrcType: + crc := decoder.LastCRC() // Current crc of decoder must match the crc of the record. // We need not match 0 crc, since the decoder is a new one at this point. if crc != 0 && rec.Validate(crc) != nil { return nil, ErrCRCMismatch } - decoder.updateCRC(rec.Crc) - case snapshotType: + decoder.UpdateCRC(rec.Crc) + case SnapshotType: var loadedSnap walpb.Snapshot pbutil.MustUnmarshal(&loadedSnap, rec.Data) if loadedSnap.Index == snap.Index { @@ -678,8 +715,8 @@ func Verify(lg *zap.Logger, walDir string, snap walpb.Snapshot) (*raftpb.HardSta } // We ignore all entry and state type records as these // are not necessary for validating the WAL contents - case entryType: - case stateType: + case EntryType: + case StateType: pbutil.MustUnmarshal(&state, rec.Data) default: return nil, fmt.Errorf("unexpected block type %d", rec.Type) @@ -737,7 +774,7 @@ func (w *WAL) cut() error { return err } - if err = w.encoder.encode(&walpb.Record{Type: metadataType, Data: w.metadata}); err != nil { + if err = w.encoder.encode(&walpb.Record{Type: MetadataType, Data: w.metadata}); err != nil { return err } @@ -894,7 +931,7 @@ func (w *WAL) Close() error { func (w *WAL) saveEntry(e *raftpb.Entry) error { // TODO: add MustMarshalTo to reduce one allocation. b := pbutil.MustMarshal(e) - rec := &walpb.Record{Type: entryType, Data: b} + rec := &walpb.Record{Type: EntryType, Data: b} if err := w.encoder.encode(rec); err != nil { return err } @@ -908,7 +945,7 @@ func (w *WAL) saveState(s *raftpb.HardState) error { } w.state = *s b := pbutil.MustMarshal(s) - rec := &walpb.Record{Type: stateType, Data: b} + rec := &walpb.Record{Type: StateType, Data: b} return w.encoder.encode(rec) } @@ -960,7 +997,7 @@ func (w *WAL) SaveSnapshot(e walpb.Snapshot) error { w.mu.Lock() defer w.mu.Unlock() - rec := &walpb.Record{Type: snapshotType, Data: b} + rec := &walpb.Record{Type: SnapshotType, Data: b} if err := w.encoder.encode(rec); err != nil { return err } @@ -972,7 +1009,7 @@ func (w *WAL) SaveSnapshot(e walpb.Snapshot) error { } func (w *WAL) saveCrc(prevCrc uint32) error { - return w.encoder.encode(&walpb.Record{Type: crcType, Crc: prevCrc}) + return w.encoder.encode(&walpb.Record{Type: CrcType, Crc: prevCrc}) } func (w *WAL) tail() *fileutil.LockedFile { diff --git a/vendor/go.etcd.io/etcd/server/v3/wal/walpb/record.go b/vendor/go.etcd.io/etcd/server/v3/storage/wal/walpb/record.go similarity index 91% rename from vendor/go.etcd.io/etcd/server/v3/wal/walpb/record.go rename to vendor/go.etcd.io/etcd/server/v3/storage/wal/walpb/record.go index e2070fbba3b0f..693deab113d6a 100644 --- a/vendor/go.etcd.io/etcd/server/v3/wal/walpb/record.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/wal/walpb/record.go @@ -14,7 +14,10 @@ package walpb -import "errors" +import ( + "errors" + "fmt" +) var ( ErrCRCMismatch = errors.New("walpb: crc mismatch") @@ -24,8 +27,7 @@ func (rec *Record) Validate(crc uint32) error { if rec.Crc == crc { return nil } - rec.Reset() - return ErrCRCMismatch + return fmt.Errorf("%w: expected: %x computed: %x", ErrCRCMismatch, rec.Crc, crc) } // ValidateSnapshotForWrite ensures the Snapshot the newly written snapshot is valid. diff --git a/vendor/go.etcd.io/etcd/server/v3/wal/walpb/record.pb.go b/vendor/go.etcd.io/etcd/server/v3/storage/wal/walpb/record.pb.go similarity index 89% rename from vendor/go.etcd.io/etcd/server/v3/wal/walpb/record.pb.go rename to vendor/go.etcd.io/etcd/server/v3/storage/wal/walpb/record.pb.go index 654d82636c095..5605fbdaa310a 100644 --- a/vendor/go.etcd.io/etcd/server/v3/wal/walpb/record.pb.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/wal/walpb/record.pb.go @@ -11,7 +11,7 @@ import ( _ "github.com/gogo/protobuf/gogoproto" proto "github.com/golang/protobuf/proto" - raftpb "go.etcd.io/etcd/raft/v3/raftpb" + raftpb "go.etcd.io/raft/v3/raftpb" ) // Reference imports to suppress errors if they are not otherwise used. @@ -119,22 +119,24 @@ func init() { func init() { proto.RegisterFile("record.proto", fileDescriptor_bf94fd919e302a1d) } var fileDescriptor_bf94fd919e302a1d = []byte{ - // 234 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x3c, 0x8e, 0x41, 0x4e, 0xc3, 0x30, - 0x10, 0x45, 0x63, 0xe2, 0x22, 0x18, 0xca, 0x02, 0xab, 0xaa, 0xa2, 0x2c, 0x4c, 0xd4, 0x55, 0x56, - 0x29, 0xe2, 0x08, 0x65, 0xcf, 0x22, 0x3d, 0x00, 0x72, 0x1d, 0xa7, 0x20, 0xd1, 0x8c, 0x35, 0xb5, - 0x04, 0xdc, 0x84, 0x23, 0x65, 0xc9, 0x09, 0x10, 0x84, 0x8b, 0xa0, 0x8c, 0x03, 0x1b, 0xfb, 0xeb, - 0x7d, 0xf9, 0x7d, 0xc3, 0x9c, 0x9c, 0x45, 0x6a, 0x2a, 0x4f, 0x18, 0x50, 0xcd, 0x5e, 0xcc, 0xb3, - 0xdf, 0xe5, 0x8b, 0x3d, 0xee, 0x91, 0xc9, 0x7a, 0x4c, 0xb1, 0xcc, 0x97, 0x64, 0xda, 0xb0, 0x1e, - 0x0f, 0xbf, 0xe3, 0x2b, 0xf2, 0xd5, 0x3d, 0x9c, 0xd6, 0x2c, 0x51, 0x19, 0xc8, 0xf0, 0xe6, 0x5d, - 0x26, 0x0a, 0x51, 0xa6, 0x1b, 0xd9, 0x7f, 0x5e, 0x27, 0x35, 0x13, 0xb5, 0x84, 0xd4, 0x92, 0xcd, - 0x4e, 0x0a, 0x51, 0x5e, 0x4e, 0xc5, 0x08, 0x94, 0x02, 0xd9, 0x98, 0x60, 0xb2, 0xb4, 0x10, 0xe5, - 0xbc, 0xe6, 0xbc, 0x22, 0x38, 0xdb, 0x76, 0xc6, 0x1f, 0x1f, 0x31, 0xa8, 0x1c, 0x66, 0x4f, 0x5d, - 0xe3, 0x5e, 0x59, 0x29, 0xa7, 0x97, 0x11, 0xf1, 0x9a, 0xa3, 0x03, 0x4b, 0xe5, 0xff, 0x9a, 0xa3, - 0x83, 0xba, 0x01, 0xb0, 0xd8, 0xb5, 0x0f, 0xc7, 0x60, 0x82, 0x63, 0xf7, 0xc5, 0xed, 0x55, 0x15, - 0x7f, 0x5e, 0xdd, 0x61, 0xd7, 0x6e, 0xc7, 0xa2, 0x3e, 0xb7, 0x7f, 0x71, 0xb3, 0xe8, 0xbf, 0x75, - 0xd2, 0x0f, 0x5a, 0x7c, 0x0c, 0x5a, 0x7c, 0x0d, 0x5a, 0xbc, 0xff, 0xe8, 0xe4, 0x37, 0x00, 0x00, - 0xff, 0xff, 0xc3, 0x36, 0x0c, 0xad, 0x1d, 0x01, 0x00, 0x00, + // 266 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x3c, 0x90, 0x41, 0x4e, 0xc3, 0x30, + 0x14, 0x44, 0x63, 0x92, 0x22, 0x30, 0x65, 0x51, 0x0b, 0xa1, 0x28, 0x8b, 0x10, 0x75, 0x15, 0x09, + 0x29, 0x46, 0x70, 0x02, 0xca, 0x9e, 0x45, 0xba, 0x63, 0x83, 0x5c, 0xe7, 0x27, 0x54, 0x6a, 0xf3, + 0xad, 0x1f, 0xab, 0x85, 0x9b, 0x70, 0xa4, 0x2c, 0x39, 0x01, 0x82, 0x70, 0x11, 0x64, 0xa7, 0xb0, + 0xfa, 0xa3, 0x37, 0x9a, 0x19, 0xcb, 0x7c, 0x4a, 0xa0, 0x91, 0xaa, 0xc2, 0x10, 0x5a, 0x14, 0x93, + 0xbd, 0xda, 0x98, 0x55, 0x72, 0xd1, 0x60, 0x83, 0x9e, 0x48, 0xa7, 0x46, 0x33, 0x99, 0x91, 0xaa, + 0xad, 0x59, 0x49, 0x77, 0x46, 0x34, 0x7f, 0xe4, 0xc7, 0xa5, 0xcf, 0x8b, 0x98, 0x47, 0xf6, 0xcd, + 0x40, 0xcc, 0x32, 0x96, 0x87, 0x8b, 0xa8, 0xff, 0xbc, 0x0a, 0x4a, 0x4f, 0xc4, 0x25, 0x0f, 0x35, + 0xe9, 0xf8, 0x28, 0x63, 0xf9, 0xf9, 0xc1, 0x70, 0x40, 0x08, 0x1e, 0x55, 0xca, 0xaa, 0x38, 0xcc, + 0x58, 0x3e, 0x2d, 0xbd, 0x9e, 0x13, 0x3f, 0x59, 0xb6, 0xca, 0x74, 0x2f, 0x68, 0x45, 0xc2, 0x27, + 0xeb, 0xb6, 0x82, 0x57, 0x5f, 0x19, 0x1d, 0x92, 0x23, 0xf2, 0x6b, 0x40, 0x5b, 0x5f, 0x1a, 0xfd, + 0xaf, 0x01, 0x6d, 0xc5, 0x0d, 0xe7, 0x1a, 0xdb, 0xfa, 0xb9, 0xb3, 0xca, 0x82, 0xef, 0x3e, 0xbb, + 0x9d, 0x15, 0xe3, 0xcb, 0x8b, 0x07, 0x6c, 0xeb, 0xa5, 0x33, 0xca, 0x53, 0xfd, 0x27, 0x17, 0xf7, + 0xfd, 0x77, 0x1a, 0xf4, 0x43, 0xca, 0x3e, 0x86, 0x94, 0x7d, 0x0d, 0x29, 0x7b, 0xff, 0x49, 0x83, + 0xa7, 0xeb, 0x06, 0x0b, 0xb0, 0xba, 0x2a, 0xd6, 0x28, 0xdd, 0x95, 0x1d, 0xd0, 0x0e, 0x48, 0xee, + 0xee, 0x64, 0x67, 0x91, 0x54, 0x03, 0x72, 0xaf, 0x36, 0xd2, 0xff, 0xd7, 0x6f, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xcf, 0xa9, 0xf0, 0x02, 0x45, 0x01, 0x00, 0x00, } func (m *Record) Marshal() (dAtA []byte, err error) { diff --git a/vendor/go.etcd.io/etcd/server/v3/wal/walpb/record.proto b/vendor/go.etcd.io/etcd/server/v3/storage/wal/walpb/record.proto similarity index 87% rename from vendor/go.etcd.io/etcd/server/v3/wal/walpb/record.proto rename to vendor/go.etcd.io/etcd/server/v3/storage/wal/walpb/record.proto index 536fa6c19c120..e1050fde48167 100644 --- a/vendor/go.etcd.io/etcd/server/v3/wal/walpb/record.proto +++ b/vendor/go.etcd.io/etcd/server/v3/storage/wal/walpb/record.proto @@ -2,7 +2,9 @@ syntax = "proto2"; package walpb; import "gogoproto/gogo.proto"; -import "raft/raftpb/raft.proto"; +import "raftpb/raft.proto"; + +option go_package = "go.etcd.io/etcd/server/v3/storage/wal/walpb"; option (gogoproto.marshaler_all) = true; option (gogoproto.sizer_all) = true; diff --git a/vendor/go.etcd.io/etcd/server/v3/verify/verify.go b/vendor/go.etcd.io/etcd/server/v3/verify/verify.go index ef613d7eb05e7..0dc99afc819f1 100644 --- a/vendor/go.etcd.io/etcd/server/v3/verify/verify.go +++ b/vendor/go.etcd.io/etcd/server/v3/verify/verify.go @@ -16,19 +16,20 @@ package verify import ( "fmt" - "os" - - "go.etcd.io/etcd/raft/v3/raftpb" - "go.etcd.io/etcd/server/v3/datadir" - "go.etcd.io/etcd/server/v3/etcdserver/cindex" - "go.etcd.io/etcd/server/v3/mvcc/backend" - wal2 "go.etcd.io/etcd/server/v3/wal" - "go.etcd.io/etcd/server/v3/wal/walpb" + "go.uber.org/zap" + + "go.etcd.io/etcd/client/pkg/v3/fileutil" + "go.etcd.io/etcd/client/pkg/v3/verify" + "go.etcd.io/etcd/server/v3/storage/backend" + "go.etcd.io/etcd/server/v3/storage/datadir" + "go.etcd.io/etcd/server/v3/storage/schema" + wal2 "go.etcd.io/etcd/server/v3/storage/wal" + "go.etcd.io/etcd/server/v3/storage/wal/walpb" + "go.etcd.io/raft/v3/raftpb" ) -const ENV_VERIFY = "ETCD_VERIFY" -const ENV_VERIFY_ALL_VALUE = "all" +const envVerifyValueStorageWAL verify.VerificationType = "storage_wal" type Config struct { // DataDir is a root directory where the data being verified are stored. @@ -47,19 +48,23 @@ type Config struct { // the function can also panic. // The function is expected to work on not-in-use data model, i.e. // no file-locks should be taken. Verify does not modified the data. -func Verify(cfg Config) error { +func Verify(cfg Config) (retErr error) { lg := cfg.Logger if lg == nil { lg = zap.NewNop() } - var err error + if !fileutil.Exist(datadir.ToBackendFileName(cfg.DataDir)) { + lg.Info("verification skipped due to non exist db file") + return nil + } + lg.Info("verification of persisted state", zap.String("data-dir", cfg.DataDir)) defer func() { - if err != nil { + if retErr != nil { lg.Error("verification of persisted state failed", zap.String("data-dir", cfg.DataDir), - zap.Error(err)) + zap.Error(retErr)) } else if r := recover(); r != nil { lg.Error("verification of persisted state failed", zap.String("data-dir", cfg.DataDir)) @@ -69,14 +74,10 @@ func Verify(cfg Config) error { } }() - beConfig := backend.DefaultBackendConfig() - beConfig.Path = datadir.ToBackendFileName(cfg.DataDir) - beConfig.Logger = cfg.Logger - - be := backend.New(beConfig) + be := backend.NewDefaultBackend(lg, datadir.ToBackendFileName(cfg.DataDir)) defer be.Close() - snapshot, hardstate, err := validateWal(cfg) + snapshot, hardstate, err := validateWAL(cfg) if err != nil { return err } @@ -90,7 +91,7 @@ func Verify(cfg Config) error { // VerifyIfEnabled performs verification according to ETCD_VERIFY env settings. // See Verify for more information. func VerifyIfEnabled(cfg Config) error { - if os.Getenv(ENV_VERIFY) == ENV_VERIFY_ALL_VALUE { + if verify.IsVerificationEnabled(envVerifyValueStorageWAL) { return Verify(cfg) } return nil @@ -108,7 +109,7 @@ func MustVerifyIfEnabled(cfg Config) { } func validateConsistentIndex(cfg Config, hardstate *raftpb.HardState, snapshot *walpb.Snapshot, be backend.Backend) error { - index, term := cindex.ReadConsistentIndex(be.ReadTx()) + index, term := schema.ReadConsistentIndex(be.ReadTx()) if cfg.ExactIndex && index != hardstate.Commit { return fmt.Errorf("backend.ConsistentIndex (%v) expected == WAL.HardState.commit (%v)", index, hardstate.Commit) } @@ -130,8 +131,8 @@ func validateConsistentIndex(cfg Config, hardstate *raftpb.HardState, snapshot * return nil } -func validateWal(cfg Config) (*walpb.Snapshot, *raftpb.HardState, error) { - walDir := datadir.ToWalDir(cfg.DataDir) +func validateWAL(cfg Config) (*walpb.Snapshot, *raftpb.HardState, error) { + walDir := datadir.ToWALDir(cfg.DataDir) walSnaps, err := wal2.ValidSnapshotEntries(cfg.Logger, walDir) if err != nil { diff --git a/vendor/go.etcd.io/raft/v3/.gitignore b/vendor/go.etcd.io/raft/v3/.gitignore new file mode 100644 index 0000000000000..485dee64bcfb4 --- /dev/null +++ b/vendor/go.etcd.io/raft/v3/.gitignore @@ -0,0 +1 @@ +.idea diff --git a/vendor/go.etcd.io/raft/v3/.golangci.yaml b/vendor/go.etcd.io/raft/v3/.golangci.yaml new file mode 100644 index 0000000000000..d169aa4e7fab0 --- /dev/null +++ b/vendor/go.etcd.io/raft/v3/.golangci.yaml @@ -0,0 +1,41 @@ +run: + timeout: 30m + skip-files: + - "^zz_generated.*" + +issues: + max-same-issues: 0 + # Excluding configuration per-path, per-linter, per-text and per-source + exclude-rules: + # exclude ineffassing linter for generated files for conversion + - path: conversion\.go + linters: + - ineffassign + +linters: + disable-all: true + enable: # please keep this alphabetized + # Don't use soon to deprecated[1] linters that lead to false + # https://github.com/golangci/golangci-lint/issues/1841 + # - deadcode + # - structcheck + # - varcheck + - goimports + - ineffassign + - revive + - staticcheck + - stylecheck + - unused + - unconvert # Remove unnecessary type conversions + +linters-settings: # please keep this alphabetized + goimports: + local-prefixes: go.etcd.io # Put imports beginning with prefix after 3rd-party packages. + staticcheck: + checks: + - "all" + - "-SA1019" # TODO(fix) Using a deprecated function, variable, constant or field + - "-SA2002" # TODO(fix) Called testing.T.FailNow or SkipNow in a goroutine, which isn’t allowed + stylecheck: + checks: + - "ST1019" # Importing the same package multiple times. diff --git a/vendor/google.golang.org/genproto/LICENSE b/vendor/go.etcd.io/raft/v3/LICENSE similarity index 100% rename from vendor/google.golang.org/genproto/LICENSE rename to vendor/go.etcd.io/raft/v3/LICENSE diff --git a/vendor/go.etcd.io/raft/v3/Makefile b/vendor/go.etcd.io/raft/v3/Makefile new file mode 100644 index 0000000000000..3ae9d6a3c2821 --- /dev/null +++ b/vendor/go.etcd.io/raft/v3/Makefile @@ -0,0 +1,28 @@ +GO_TEST_FLAGS?= + +.PHONY: verify +verify: verify-gofmt verify-dep verify-lint verify-mod-tidy verify-genproto + +.PHONY: verify-gofmt +verify-gofmt: + PASSES="gofmt" ./scripts/test.sh + +.PHONY: verify-dep +verify-dep: + PASSES="dep" ./scripts/test.sh + +.PHONY: verify-lint +verify-lint: + golangci-lint run + +.PHONY: verify-mod-tidy +verify-mod-tidy: + PASSES="mod_tidy" ./scripts/test.sh + +.PHONY: verify-genproto +verify-genproto: + PASSES="genproto" ./scripts/test.sh + +.PHONY: test +test: + PASSES="unit" ./scripts/test.sh $(GO_TEST_FLAGS) diff --git a/vendor/go.etcd.io/raft/v3/OWNERS b/vendor/go.etcd.io/raft/v3/OWNERS new file mode 100644 index 0000000000000..f57ce1f7930cd --- /dev/null +++ b/vendor/go.etcd.io/raft/v3/OWNERS @@ -0,0 +1,12 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - ahrtr # Benjamin Wang + - mitake # Hitoshi Mitake + - serathius # Marek Siarkowicz + - ptabor # Piotr Tabor + - spzala # Sahdev Zala +reviewers: + - pav-kv # Pavel Kalinnikov +emeritus_approvers: + - tbg # Tobias Grieger \ No newline at end of file diff --git a/vendor/go.etcd.io/etcd/raft/v3/README.md b/vendor/go.etcd.io/raft/v3/README.md similarity index 99% rename from vendor/go.etcd.io/etcd/raft/v3/README.md rename to vendor/go.etcd.io/raft/v3/README.md index fbd8b4d49b5ed..09a56efa62455 100644 --- a/vendor/go.etcd.io/etcd/raft/v3/README.md +++ b/vendor/go.etcd.io/raft/v3/README.md @@ -198,4 +198,4 @@ This approach introduces a problem when removing a member from a two-member clus ## Go docs -More detailed development documentation can be found in go docs: https://pkg.go.dev/go.etcd.io/etcd/raft/v3. \ No newline at end of file +More detailed development documentation can be found in go docs: https://pkg.go.dev/go.etcd.io/raft/v3. \ No newline at end of file diff --git a/vendor/go.etcd.io/etcd/raft/v3/bootstrap.go b/vendor/go.etcd.io/raft/v3/bootstrap.go similarity index 98% rename from vendor/go.etcd.io/etcd/raft/v3/bootstrap.go rename to vendor/go.etcd.io/raft/v3/bootstrap.go index 824bd5f51bcd6..2a61aa23e473b 100644 --- a/vendor/go.etcd.io/etcd/raft/v3/bootstrap.go +++ b/vendor/go.etcd.io/raft/v3/bootstrap.go @@ -17,7 +17,7 @@ package raft import ( "errors" - pb "go.etcd.io/etcd/raft/v3/raftpb" + pb "go.etcd.io/raft/v3/raftpb" ) // Bootstrap initializes the RawNode for first use by appending configuration diff --git a/vendor/go.etcd.io/etcd/raft/v3/confchange/confchange.go b/vendor/go.etcd.io/raft/v3/confchange/confchange.go similarity index 87% rename from vendor/go.etcd.io/etcd/raft/v3/confchange/confchange.go rename to vendor/go.etcd.io/raft/v3/confchange/confchange.go index dddbcc9d9b57e..55db16a8aeed2 100644 --- a/vendor/go.etcd.io/etcd/raft/v3/confchange/confchange.go +++ b/vendor/go.etcd.io/raft/v3/confchange/confchange.go @@ -19,9 +19,9 @@ import ( "fmt" "strings" - "go.etcd.io/etcd/raft/v3/quorum" - pb "go.etcd.io/etcd/raft/v3/raftpb" - "go.etcd.io/etcd/raft/v3/tracker" + "go.etcd.io/raft/v3/quorum" + pb "go.etcd.io/raft/v3/raftpb" + "go.etcd.io/raft/v3/tracker" ) // Changer facilitates configuration changes. It exposes methods to handle @@ -49,7 +49,7 @@ type Changer struct { // // [1]: https://github.com/ongardie/dissertation/blob/master/online-trim.pdf func (c Changer) EnterJoint(autoLeave bool, ccs ...pb.ConfChangeSingle) (tracker.Config, tracker.ProgressMap, error) { - cfg, prs, err := c.checkAndCopy() + cfg, trk, err := c.checkAndCopy() if err != nil { return c.err(err) } @@ -70,11 +70,11 @@ func (c Changer) EnterJoint(autoLeave bool, ccs ...pb.ConfChangeSingle) (tracker outgoing(cfg.Voters)[id] = struct{}{} } - if err := c.apply(&cfg, prs, ccs...); err != nil { + if err := c.apply(&cfg, trk, ccs...); err != nil { return c.err(err) } cfg.AutoLeave = autoLeave - return checkAndReturn(cfg, prs) + return checkAndReturn(cfg, trk) } // LeaveJoint transitions out of a joint configuration. It is an error to call @@ -92,7 +92,7 @@ func (c Changer) EnterJoint(autoLeave bool, ccs ...pb.ConfChangeSingle) (tracker // // [1]: https://github.com/ongardie/dissertation/blob/master/online-trim.pdf func (c Changer) LeaveJoint() (tracker.Config, tracker.ProgressMap, error) { - cfg, prs, err := c.checkAndCopy() + cfg, trk, err := c.checkAndCopy() if err != nil { return c.err(err) } @@ -100,13 +100,9 @@ func (c Changer) LeaveJoint() (tracker.Config, tracker.ProgressMap, error) { err := errors.New("can't leave a non-joint config") return c.err(err) } - if len(outgoing(cfg.Voters)) == 0 { - err := fmt.Errorf("configuration is not joint: %v", cfg) - return c.err(err) - } for id := range cfg.LearnersNext { nilAwareAdd(&cfg.Learners, id) - prs[id].IsLearner = true + trk[id].IsLearner = true } cfg.LearnersNext = nil @@ -115,13 +111,13 @@ func (c Changer) LeaveJoint() (tracker.Config, tracker.ProgressMap, error) { _, isLearner := cfg.Learners[id] if !isVoter && !isLearner { - delete(prs, id) + delete(trk, id) } } *outgoingPtr(&cfg.Voters) = nil cfg.AutoLeave = false - return checkAndReturn(cfg, prs) + return checkAndReturn(cfg, trk) } // Simple carries out a series of configuration changes that (in aggregate) @@ -130,7 +126,7 @@ func (c Changer) LeaveJoint() (tracker.Config, tracker.ProgressMap, error) { // zero, or if the configuration is in a joint state (i.e. if there is an // outgoing configuration). func (c Changer) Simple(ccs ...pb.ConfChangeSingle) (tracker.Config, tracker.ProgressMap, error) { - cfg, prs, err := c.checkAndCopy() + cfg, trk, err := c.checkAndCopy() if err != nil { return c.err(err) } @@ -138,20 +134,20 @@ func (c Changer) Simple(ccs ...pb.ConfChangeSingle) (tracker.Config, tracker.Pro err := errors.New("can't apply simple config change in joint config") return c.err(err) } - if err := c.apply(&cfg, prs, ccs...); err != nil { + if err := c.apply(&cfg, trk, ccs...); err != nil { return c.err(err) } if n := symdiff(incoming(c.Tracker.Voters), incoming(cfg.Voters)); n > 1 { return tracker.Config{}, nil, errors.New("more than one voter changed without entering joint config") } - return checkAndReturn(cfg, prs) + return checkAndReturn(cfg, trk) } // apply a change to the configuration. By convention, changes to voters are // always made to the incoming majority config Voters[0]. Voters[1] is either // empty or preserves the outgoing majority configuration while in a joint state. -func (c Changer) apply(cfg *tracker.Config, prs tracker.ProgressMap, ccs ...pb.ConfChangeSingle) error { +func (c Changer) apply(cfg *tracker.Config, trk tracker.ProgressMap, ccs ...pb.ConfChangeSingle) error { for _, cc := range ccs { if cc.NodeID == 0 { // etcd replaces the NodeID with zero if it decides (downstream of @@ -161,11 +157,11 @@ func (c Changer) apply(cfg *tracker.Config, prs tracker.ProgressMap, ccs ...pb.C } switch cc.Type { case pb.ConfChangeAddNode: - c.makeVoter(cfg, prs, cc.NodeID) + c.makeVoter(cfg, trk, cc.NodeID) case pb.ConfChangeAddLearnerNode: - c.makeLearner(cfg, prs, cc.NodeID) + c.makeLearner(cfg, trk, cc.NodeID) case pb.ConfChangeRemoveNode: - c.remove(cfg, prs, cc.NodeID) + c.remove(cfg, trk, cc.NodeID) case pb.ConfChangeUpdateNode: default: return fmt.Errorf("unexpected conf type %d", cc.Type) @@ -179,10 +175,10 @@ func (c Changer) apply(cfg *tracker.Config, prs tracker.ProgressMap, ccs ...pb.C // makeVoter adds or promotes the given ID to be a voter in the incoming // majority config. -func (c Changer) makeVoter(cfg *tracker.Config, prs tracker.ProgressMap, id uint64) { - pr := prs[id] +func (c Changer) makeVoter(cfg *tracker.Config, trk tracker.ProgressMap, id uint64) { + pr := trk[id] if pr == nil { - c.initProgress(cfg, prs, id, false /* isLearner */) + c.initProgress(cfg, trk, id, false /* isLearner */) return } @@ -205,19 +201,19 @@ func (c Changer) makeVoter(cfg *tracker.Config, prs tracker.ProgressMap, id uint // simultaneously. Instead, we add the learner to LearnersNext, so that it will // be added to Learners the moment the outgoing config is removed by // LeaveJoint(). -func (c Changer) makeLearner(cfg *tracker.Config, prs tracker.ProgressMap, id uint64) { - pr := prs[id] +func (c Changer) makeLearner(cfg *tracker.Config, trk tracker.ProgressMap, id uint64) { + pr := trk[id] if pr == nil { - c.initProgress(cfg, prs, id, true /* isLearner */) + c.initProgress(cfg, trk, id, true /* isLearner */) return } if pr.IsLearner { return } // Remove any existing voter in the incoming config... - c.remove(cfg, prs, id) + c.remove(cfg, trk, id) // ... but save the Progress. - prs[id] = pr + trk[id] = pr // Use LearnersNext if we can't add the learner to Learners directly, i.e. // if the peer is still tracked as a voter in the outgoing config. It will // be turned into a learner in LeaveJoint(). @@ -232,8 +228,8 @@ func (c Changer) makeLearner(cfg *tracker.Config, prs tracker.ProgressMap, id ui } // remove this peer as a voter or learner from the incoming config. -func (c Changer) remove(cfg *tracker.Config, prs tracker.ProgressMap, id uint64) { - if _, ok := prs[id]; !ok { +func (c Changer) remove(cfg *tracker.Config, trk tracker.ProgressMap, id uint64) { + if _, ok := trk[id]; !ok { return } @@ -243,18 +239,18 @@ func (c Changer) remove(cfg *tracker.Config, prs tracker.ProgressMap, id uint64) // If the peer is still a voter in the outgoing config, keep the Progress. if _, onRight := outgoing(cfg.Voters)[id]; !onRight { - delete(prs, id) + delete(trk, id) } } // initProgress initializes a new progress for the given node or learner. -func (c Changer) initProgress(cfg *tracker.Config, prs tracker.ProgressMap, id uint64, isLearner bool) { +func (c Changer) initProgress(cfg *tracker.Config, trk tracker.ProgressMap, id uint64, isLearner bool) { if !isLearner { incoming(cfg.Voters)[id] = struct{}{} } else { nilAwareAdd(&cfg.Learners, id) } - prs[id] = &tracker.Progress{ + trk[id] = &tracker.Progress{ // Initializing the Progress with the last index means that the follower // can be probed (with the last index). // @@ -265,7 +261,7 @@ func (c Changer) initProgress(cfg *tracker.Config, prs tracker.ProgressMap, id u // making the first index the better choice). Next: c.LastIndex, Match: 0, - Inflights: tracker.NewInflights(c.Tracker.MaxInflight), + Inflights: tracker.NewInflights(c.Tracker.MaxInflight, c.Tracker.MaxInflightBytes), IsLearner: isLearner, // When a node is first added, we should mark it as recently active. // Otherwise, CheckQuorum may cause us to step down if it is invoked @@ -277,7 +273,7 @@ func (c Changer) initProgress(cfg *tracker.Config, prs tracker.ProgressMap, id u // checkInvariants makes sure that the config and progress are compatible with // each other. This is used to check both what the Changer is initialized with, // as well as what it returns. -func checkInvariants(cfg tracker.Config, prs tracker.ProgressMap) error { +func checkInvariants(cfg tracker.Config, trk tracker.ProgressMap) error { // NB: intentionally allow the empty config. In production we'll never see a // non-empty config (we prevent it from being created) but we will need to // be able to *create* an initial config, for example during bootstrap (or @@ -290,7 +286,7 @@ func checkInvariants(cfg tracker.Config, prs tracker.ProgressMap) error { cfg.LearnersNext, } { for id := range ids { - if _, ok := prs[id]; !ok { + if _, ok := trk[id]; !ok { return fmt.Errorf("no progress for %d", id) } } @@ -302,7 +298,7 @@ func checkInvariants(cfg tracker.Config, prs tracker.ProgressMap) error { if _, ok := outgoing(cfg.Voters)[id]; !ok { return fmt.Errorf("%d is in LearnersNext, but not Voters[1]", id) } - if prs[id].IsLearner { + if trk[id].IsLearner { return fmt.Errorf("%d is in LearnersNext, but is already marked as learner", id) } } @@ -314,7 +310,7 @@ func checkInvariants(cfg tracker.Config, prs tracker.ProgressMap) error { if _, ok := incoming(cfg.Voters)[id]; ok { return fmt.Errorf("%d is in Learners and Voters[0]", id) } - if !prs[id].IsLearner { + if !trk[id].IsLearner { return fmt.Errorf("%d is in Learners, but is not marked as learner", id) } } @@ -340,23 +336,23 @@ func checkInvariants(cfg tracker.Config, prs tracker.ProgressMap) error { // if checkInvariants does. func (c Changer) checkAndCopy() (tracker.Config, tracker.ProgressMap, error) { cfg := c.Tracker.Config.Clone() - prs := tracker.ProgressMap{} + trk := tracker.ProgressMap{} for id, pr := range c.Tracker.Progress { // A shallow copy is enough because we only mutate the Learner field. ppr := *pr - prs[id] = &ppr + trk[id] = &ppr } - return checkAndReturn(cfg, prs) + return checkAndReturn(cfg, trk) } // checkAndReturn calls checkInvariants on the input and returns either the // resulting error or the input. -func checkAndReturn(cfg tracker.Config, prs tracker.ProgressMap) (tracker.Config, tracker.ProgressMap, error) { - if err := checkInvariants(cfg, prs); err != nil { +func checkAndReturn(cfg tracker.Config, trk tracker.ProgressMap) (tracker.Config, tracker.ProgressMap, error) { + if err := checkInvariants(cfg, trk); err != nil { return tracker.Config{}, tracker.ProgressMap{}, err } - return cfg, prs, nil + return cfg, trk, nil } // err returns zero values and an error. diff --git a/vendor/go.etcd.io/etcd/raft/v3/confchange/restore.go b/vendor/go.etcd.io/raft/v3/confchange/restore.go similarity index 97% rename from vendor/go.etcd.io/etcd/raft/v3/confchange/restore.go rename to vendor/go.etcd.io/raft/v3/confchange/restore.go index ea317fc289a80..68ef029a9c672 100644 --- a/vendor/go.etcd.io/etcd/raft/v3/confchange/restore.go +++ b/vendor/go.etcd.io/raft/v3/confchange/restore.go @@ -15,8 +15,8 @@ package confchange import ( - pb "go.etcd.io/etcd/raft/v3/raftpb" - "go.etcd.io/etcd/raft/v3/tracker" + pb "go.etcd.io/raft/v3/raftpb" + "go.etcd.io/raft/v3/tracker" ) // toConfChangeSingle translates a conf state into 1) a slice of operations creating @@ -98,12 +98,12 @@ func toConfChangeSingle(cs pb.ConfState) (out []pb.ConfChangeSingle, in []pb.Con func chain(chg Changer, ops ...func(Changer) (tracker.Config, tracker.ProgressMap, error)) (tracker.Config, tracker.ProgressMap, error) { for _, op := range ops { - cfg, prs, err := op(chg) + cfg, trk, err := op(chg) if err != nil { return tracker.Config{}, nil, err } chg.Tracker.Config = cfg - chg.Tracker.Progress = prs + chg.Tracker.Progress = trk } return chg.Tracker.Config, chg.Tracker.Progress, nil } diff --git a/vendor/go.etcd.io/etcd/raft/v3/design.md b/vendor/go.etcd.io/raft/v3/design.md similarity index 100% rename from vendor/go.etcd.io/etcd/raft/v3/design.md rename to vendor/go.etcd.io/raft/v3/design.md diff --git a/vendor/go.etcd.io/etcd/raft/v3/doc.go b/vendor/go.etcd.io/raft/v3/doc.go similarity index 79% rename from vendor/go.etcd.io/etcd/raft/v3/doc.go rename to vendor/go.etcd.io/raft/v3/doc.go index 4febfe6084b82..06253f4e12c70 100644 --- a/vendor/go.etcd.io/etcd/raft/v3/doc.go +++ b/vendor/go.etcd.io/raft/v3/doc.go @@ -169,6 +169,94 @@ given ID MUST be used only once even if the old node has been removed. This means that for example IP addresses make poor node IDs since they may be reused. Node IDs must be non-zero. +# Usage with Asynchronous Storage Writes + +The library can be configured with an alternate interface for local storage +writes that can provide better performance in the presence of high proposal +concurrency by minimizing interference between proposals. This feature is called +AsynchronousStorageWrites, and can be enabled using the flag on the Config +struct with the same name. + +When Asynchronous Storage Writes is enabled, the responsibility of code using +the library is different from what was presented above. Users still read from +the Node.Ready() channel. However, they process the updates it contains in a +different manner. Users no longer consult the HardState, Entries, and Snapshot +fields (steps 1 and 3 above). They also no longer call Node.Advance() to +indicate that they have processed all entries in the Ready (step 4 above). +Instead, all local storage operations are also communicated through messages +present in the Ready.Message slice. + +The local storage messages come in two flavors. The first flavor is log append +messages, which target a LocalAppendThread and carry Entries, HardState, and a +Snapshot. The second flavor is entry application messages, which target a +LocalApplyThread and carry CommittedEntries. Messages to the same target must be +reliably processed in order. Messages to different targets can be processed in +any order. + +Each local storage message carries a slice of response messages that must +delivered after the corresponding storage write has been completed. These +responses may target the same node or may target other nodes. + +With Asynchronous Storage Writes enabled, the total state machine handling loop +will look something like this: + + for { + select { + case <-s.Ticker: + n.Tick() + case rd := <-s.Node.Ready(): + for _, m := range rd.Messages { + switch m.To { + case raft.LocalAppendThread: + toAppend <- m + case raft.LocalApplyThread: + toApply <-m + default: + sendOverNetwork(m) + } + } + case <-s.done: + return + } + } + +Usage of Asynchronous Storage Writes will typically also contain a pair of +storage handler threads, one for log writes (append) and one for entry +application to the local state machine (apply). Those will look something like: + + // append thread + go func() { + for { + select { + case m := <-toAppend: + saveToStorage(m.State, m.Entries, m.Snapshot) + send(m.Responses) + case <-s.done: + return + } + } + } + + // apply thread + go func() { + for { + select { + case m := <-toApply: + for _, entry := range m.CommittedEntries { + process(entry) + if entry.Type == raftpb.EntryConfChange { + var cc raftpb.ConfChange + cc.Unmarshal(entry.Data) + s.Node.ApplyConfChange(cc) + } + } + send(m.Responses) + case <-s.done: + return + } + } + } + # Implementation notes This implementation is up to date with the final Raft thesis @@ -295,5 +383,17 @@ stale log entries: that the follower that sent this 'MsgUnreachable' is not reachable, often indicating 'MsgApp' is lost. When follower's progress state is replicate, the leader sets it back to probe. + + 'MsgStorageAppend' is a message from a node to its local append storage + thread to write entries, hard state, and/or a snapshot to stable storage. + The message will carry one or more responses, one of which will be a + 'MsgStorageAppendResp' back to itself. The responses can also contain + 'MsgAppResp', 'MsgVoteResp', and 'MsgPreVoteResp' messages. Used with + AsynchronousStorageWrites. + + 'MsgStorageApply' is a message from a node to its local apply storage + thread to apply committed entries. The message will carry one response, + which will be a 'MsgStorageApplyResp' back to itself. Used with + AsynchronousStorageWrites. */ package raft diff --git a/vendor/go.etcd.io/raft/v3/log.go b/vendor/go.etcd.io/raft/v3/log.go new file mode 100644 index 0000000000000..84826c3a0b5fc --- /dev/null +++ b/vendor/go.etcd.io/raft/v3/log.go @@ -0,0 +1,568 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "fmt" + "log" + + pb "go.etcd.io/raft/v3/raftpb" +) + +type raftLog struct { + // storage contains all stable entries since the last snapshot. + storage Storage + + // unstable contains all unstable entries and snapshot. + // they will be saved into storage. + unstable unstable + + // committed is the highest log position that is known to be in + // stable storage on a quorum of nodes. + committed uint64 + // applying is the highest log position that the application has + // been instructed to apply to its state machine. Some of these + // entries may be in the process of applying and have not yet + // reached applied. + // Use: The field is incremented when accepting a Ready struct. + // Invariant: applied <= applying && applying <= committed + applying uint64 + // applied is the highest log position that the application has + // successfully applied to its state machine. + // Use: The field is incremented when advancing after the committed + // entries in a Ready struct have been applied (either synchronously + // or asynchronously). + // Invariant: applied <= committed + applied uint64 + + logger Logger + + // maxApplyingEntsSize limits the outstanding byte size of the messages + // returned from calls to nextCommittedEnts that have not been acknowledged + // by a call to appliedTo. + maxApplyingEntsSize entryEncodingSize + // applyingEntsSize is the current outstanding byte size of the messages + // returned from calls to nextCommittedEnts that have not been acknowledged + // by a call to appliedTo. + applyingEntsSize entryEncodingSize + // applyingEntsPaused is true when entry application has been paused until + // enough progress is acknowledged. + applyingEntsPaused bool +} + +// newLog returns log using the given storage and default options. It +// recovers the log to the state that it just commits and applies the +// latest snapshot. +func newLog(storage Storage, logger Logger) *raftLog { + return newLogWithSize(storage, logger, noLimit) +} + +// newLogWithSize returns a log using the given storage and max +// message size. +func newLogWithSize(storage Storage, logger Logger, maxApplyingEntsSize entryEncodingSize) *raftLog { + if storage == nil { + log.Panic("storage must not be nil") + } + log := &raftLog{ + storage: storage, + logger: logger, + maxApplyingEntsSize: maxApplyingEntsSize, + } + firstIndex, err := storage.FirstIndex() + if err != nil { + panic(err) // TODO(bdarnell) + } + lastIndex, err := storage.LastIndex() + if err != nil { + panic(err) // TODO(bdarnell) + } + log.unstable.offset = lastIndex + 1 + log.unstable.offsetInProgress = lastIndex + 1 + log.unstable.logger = logger + // Initialize our committed and applied pointers to the time of the last compaction. + log.committed = firstIndex - 1 + log.applying = firstIndex - 1 + log.applied = firstIndex - 1 + + return log +} + +func (l *raftLog) String() string { + return fmt.Sprintf("committed=%d, applied=%d, applying=%d, unstable.offset=%d, unstable.offsetInProgress=%d, len(unstable.Entries)=%d", + l.committed, l.applied, l.applying, l.unstable.offset, l.unstable.offsetInProgress, len(l.unstable.entries)) +} + +// maybeAppend returns (0, false) if the entries cannot be appended. Otherwise, +// it returns (last index of new entries, true). +func (l *raftLog) maybeAppend(index, logTerm, committed uint64, ents ...pb.Entry) (lastnewi uint64, ok bool) { + if !l.matchTerm(index, logTerm) { + return 0, false + } + + lastnewi = index + uint64(len(ents)) + ci := l.findConflict(ents) + switch { + case ci == 0: + case ci <= l.committed: + l.logger.Panicf("entry %d conflict with committed entry [committed(%d)]", ci, l.committed) + default: + offset := index + 1 + if ci-offset > uint64(len(ents)) { + l.logger.Panicf("index, %d, is out of range [%d]", ci-offset, len(ents)) + } + l.append(ents[ci-offset:]...) + } + l.commitTo(min(committed, lastnewi)) + return lastnewi, true +} + +func (l *raftLog) append(ents ...pb.Entry) uint64 { + if len(ents) == 0 { + return l.lastIndex() + } + if after := ents[0].Index - 1; after < l.committed { + l.logger.Panicf("after(%d) is out of range [committed(%d)]", after, l.committed) + } + l.unstable.truncateAndAppend(ents) + return l.lastIndex() +} + +// findConflict finds the index of the conflict. +// It returns the first pair of conflicting entries between the existing +// entries and the given entries, if there are any. +// If there is no conflicting entries, and the existing entries contains +// all the given entries, zero will be returned. +// If there is no conflicting entries, but the given entries contains new +// entries, the index of the first new entry will be returned. +// An entry is considered to be conflicting if it has the same index but +// a different term. +// The index of the given entries MUST be continuously increasing. +func (l *raftLog) findConflict(ents []pb.Entry) uint64 { + for _, ne := range ents { + if !l.matchTerm(ne.Index, ne.Term) { + if ne.Index <= l.lastIndex() { + l.logger.Infof("found conflict at index %d [existing term: %d, conflicting term: %d]", + ne.Index, l.zeroTermOnOutOfBounds(l.term(ne.Index)), ne.Term) + } + return ne.Index + } + } + return 0 +} + +// findConflictByTerm returns a best guess on where this log ends matching +// another log, given that the only information known about the other log is the +// (index, term) of its single entry. +// +// Specifically, the first returned value is the max guessIndex <= index, such +// that term(guessIndex) <= term or term(guessIndex) is not known (because this +// index is compacted or not yet stored). +// +// The second returned value is the term(guessIndex), or 0 if it is unknown. +// +// This function is used by a follower and leader to resolve log conflicts after +// an unsuccessful append to a follower, and ultimately restore the steady flow +// of appends. +func (l *raftLog) findConflictByTerm(index uint64, term uint64) (uint64, uint64) { + for ; index > 0; index-- { + // If there is an error (likely ErrCompacted or ErrUnavailable), we don't + // know whether it's a match or not, so assume a possible match and return + // the index, with 0 term indicating an unknown term. + if ourTerm, err := l.term(index); err != nil { + return index, 0 + } else if ourTerm <= term { + return index, ourTerm + } + } + return 0, 0 +} + +// nextUnstableEnts returns all entries that are available to be written to the +// local stable log and are not already in-progress. +func (l *raftLog) nextUnstableEnts() []pb.Entry { + return l.unstable.nextEntries() +} + +// hasNextUnstableEnts returns if there are any entries that are available to be +// written to the local stable log and are not already in-progress. +func (l *raftLog) hasNextUnstableEnts() bool { + return len(l.nextUnstableEnts()) > 0 +} + +// hasNextOrInProgressUnstableEnts returns if there are any entries that are +// available to be written to the local stable log or in the process of being +// written to the local stable log. +func (l *raftLog) hasNextOrInProgressUnstableEnts() bool { + return len(l.unstable.entries) > 0 +} + +// nextCommittedEnts returns all the available entries for execution. +// Entries can be committed even when the local raft instance has not durably +// appended them to the local raft log yet. If allowUnstable is true, committed +// entries from the unstable log may be returned; otherwise, only entries known +// to reside locally on stable storage will be returned. +func (l *raftLog) nextCommittedEnts(allowUnstable bool) (ents []pb.Entry) { + if l.applyingEntsPaused { + // Entry application outstanding size limit reached. + return nil + } + if l.hasNextOrInProgressSnapshot() { + // See comment in hasNextCommittedEnts. + return nil + } + lo, hi := l.applying+1, l.maxAppliableIndex(allowUnstable)+1 // [lo, hi) + if lo >= hi { + // Nothing to apply. + return nil + } + maxSize := l.maxApplyingEntsSize - l.applyingEntsSize + if maxSize <= 0 { + l.logger.Panicf("applying entry size (%d-%d)=%d not positive", + l.maxApplyingEntsSize, l.applyingEntsSize, maxSize) + } + ents, err := l.slice(lo, hi, maxSize) + if err != nil { + l.logger.Panicf("unexpected error when getting unapplied entries (%v)", err) + } + return ents +} + +// hasNextCommittedEnts returns if there is any available entries for execution. +// This is a fast check without heavy raftLog.slice() in nextCommittedEnts(). +func (l *raftLog) hasNextCommittedEnts(allowUnstable bool) bool { + if l.applyingEntsPaused { + // Entry application outstanding size limit reached. + return false + } + if l.hasNextOrInProgressSnapshot() { + // If we have a snapshot to apply, don't also return any committed + // entries. Doing so raises questions about what should be applied + // first. + return false + } + lo, hi := l.applying+1, l.maxAppliableIndex(allowUnstable)+1 // [lo, hi) + return lo < hi +} + +// maxAppliableIndex returns the maximum committed index that can be applied. +// If allowUnstable is true, committed entries from the unstable log can be +// applied; otherwise, only entries known to reside locally on stable storage +// can be applied. +func (l *raftLog) maxAppliableIndex(allowUnstable bool) uint64 { + hi := l.committed + if !allowUnstable { + hi = min(hi, l.unstable.offset-1) + } + return hi +} + +// nextUnstableSnapshot returns the snapshot, if present, that is available to +// be applied to the local storage and is not already in-progress. +func (l *raftLog) nextUnstableSnapshot() *pb.Snapshot { + return l.unstable.nextSnapshot() +} + +// hasNextUnstableSnapshot returns if there is a snapshot that is available to +// be applied to the local storage and is not already in-progress. +func (l *raftLog) hasNextUnstableSnapshot() bool { + return l.unstable.nextSnapshot() != nil +} + +// hasNextOrInProgressSnapshot returns if there is pending snapshot waiting for +// applying or in the process of being applied. +func (l *raftLog) hasNextOrInProgressSnapshot() bool { + return l.unstable.snapshot != nil +} + +func (l *raftLog) snapshot() (pb.Snapshot, error) { + if l.unstable.snapshot != nil { + return *l.unstable.snapshot, nil + } + return l.storage.Snapshot() +} + +func (l *raftLog) firstIndex() uint64 { + if i, ok := l.unstable.maybeFirstIndex(); ok { + return i + } + index, err := l.storage.FirstIndex() + if err != nil { + panic(err) // TODO(bdarnell) + } + return index +} + +func (l *raftLog) lastIndex() uint64 { + if i, ok := l.unstable.maybeLastIndex(); ok { + return i + } + i, err := l.storage.LastIndex() + if err != nil { + panic(err) // TODO(bdarnell) + } + return i +} + +func (l *raftLog) commitTo(tocommit uint64) { + // never decrease commit + if l.committed < tocommit { + if l.lastIndex() < tocommit { + l.logger.Panicf("tocommit(%d) is out of range [lastIndex(%d)]. Was the raft log corrupted, truncated, or lost?", tocommit, l.lastIndex()) + } + l.committed = tocommit + } +} + +func (l *raftLog) appliedTo(i uint64, size entryEncodingSize) { + if l.committed < i || i < l.applied { + l.logger.Panicf("applied(%d) is out of range [prevApplied(%d), committed(%d)]", i, l.applied, l.committed) + } + l.applied = i + l.applying = max(l.applying, i) + if l.applyingEntsSize > size { + l.applyingEntsSize -= size + } else { + // Defense against underflow. + l.applyingEntsSize = 0 + } + l.applyingEntsPaused = l.applyingEntsSize >= l.maxApplyingEntsSize +} + +func (l *raftLog) acceptApplying(i uint64, size entryEncodingSize, allowUnstable bool) { + if l.committed < i { + l.logger.Panicf("applying(%d) is out of range [prevApplying(%d), committed(%d)]", i, l.applying, l.committed) + } + l.applying = i + l.applyingEntsSize += size + // Determine whether to pause entry application until some progress is + // acknowledged. We pause in two cases: + // 1. the outstanding entry size equals or exceeds the maximum size. + // 2. the outstanding entry size does not equal or exceed the maximum size, + // but we determine that the next entry in the log will push us over the + // limit. We determine this by comparing the last entry returned from + // raftLog.nextCommittedEnts to the maximum entry that the method was + // allowed to return had there been no size limit. If these indexes are + // not equal, then the returned entries slice must have been truncated to + // adhere to the memory limit. + l.applyingEntsPaused = l.applyingEntsSize >= l.maxApplyingEntsSize || + i < l.maxAppliableIndex(allowUnstable) +} + +func (l *raftLog) stableTo(i, t uint64) { l.unstable.stableTo(i, t) } + +func (l *raftLog) stableSnapTo(i uint64) { l.unstable.stableSnapTo(i) } + +// acceptUnstable indicates that the application has started persisting the +// unstable entries in storage, and that the current unstable entries are thus +// to be marked as being in-progress, to avoid returning them with future calls +// to Ready(). +func (l *raftLog) acceptUnstable() { l.unstable.acceptInProgress() } + +func (l *raftLog) lastTerm() uint64 { + t, err := l.term(l.lastIndex()) + if err != nil { + l.logger.Panicf("unexpected error when getting the last term (%v)", err) + } + return t +} + +func (l *raftLog) term(i uint64) (uint64, error) { + // Check the unstable log first, even before computing the valid term range, + // which may need to access stable Storage. If we find the entry's term in + // the unstable log, we know it was in the valid range. + if t, ok := l.unstable.maybeTerm(i); ok { + return t, nil + } + + // The valid term range is [firstIndex-1, lastIndex]. Even though the entry at + // firstIndex-1 is compacted away, its term is available for matching purposes + // when doing log appends. + if i+1 < l.firstIndex() { + return 0, ErrCompacted + } + if i > l.lastIndex() { + return 0, ErrUnavailable + } + + t, err := l.storage.Term(i) + if err == nil { + return t, nil + } + if err == ErrCompacted || err == ErrUnavailable { + return 0, err + } + panic(err) // TODO(bdarnell) +} + +func (l *raftLog) entries(i uint64, maxSize entryEncodingSize) ([]pb.Entry, error) { + if i > l.lastIndex() { + return nil, nil + } + return l.slice(i, l.lastIndex()+1, maxSize) +} + +// allEntries returns all entries in the log. +func (l *raftLog) allEntries() []pb.Entry { + ents, err := l.entries(l.firstIndex(), noLimit) + if err == nil { + return ents + } + if err == ErrCompacted { // try again if there was a racing compaction + return l.allEntries() + } + // TODO (xiangli): handle error? + panic(err) +} + +// isUpToDate determines if the given (lastIndex,term) log is more up-to-date +// by comparing the index and term of the last entries in the existing logs. +// If the logs have last entries with different terms, then the log with the +// later term is more up-to-date. If the logs end with the same term, then +// whichever log has the larger lastIndex is more up-to-date. If the logs are +// the same, the given log is up-to-date. +func (l *raftLog) isUpToDate(lasti, term uint64) bool { + return term > l.lastTerm() || (term == l.lastTerm() && lasti >= l.lastIndex()) +} + +func (l *raftLog) matchTerm(i, term uint64) bool { + t, err := l.term(i) + if err != nil { + return false + } + return t == term +} + +func (l *raftLog) maybeCommit(maxIndex, term uint64) bool { + // NB: term should never be 0 on a commit because the leader campaigns at + // least at term 1. But if it is 0 for some reason, we don't want to consider + // this a term match in case zeroTermOnOutOfBounds returns 0. + if maxIndex > l.committed && term != 0 && l.zeroTermOnOutOfBounds(l.term(maxIndex)) == term { + l.commitTo(maxIndex) + return true + } + return false +} + +func (l *raftLog) restore(s pb.Snapshot) { + l.logger.Infof("log [%s] starts to restore snapshot [index: %d, term: %d]", l, s.Metadata.Index, s.Metadata.Term) + l.committed = s.Metadata.Index + l.unstable.restore(s) +} + +// scan visits all log entries in the [lo, hi) range, returning them via the +// given callback. The callback can be invoked multiple times, with consecutive +// sub-ranges of the requested range. Returns up to pageSize bytes worth of +// entries at a time. May return more if a single entry size exceeds the limit. +// +// The entries in [lo, hi) must exist, otherwise scan() eventually returns an +// error (possibly after passing some entries through the callback). +// +// If the callback returns an error, scan terminates and returns this error +// immediately. This can be used to stop the scan early ("break" the loop). +func (l *raftLog) scan(lo, hi uint64, pageSize entryEncodingSize, v func([]pb.Entry) error) error { + for lo < hi { + ents, err := l.slice(lo, hi, pageSize) + if err != nil { + return err + } else if len(ents) == 0 { + return fmt.Errorf("got 0 entries in [%d, %d)", lo, hi) + } + if err := v(ents); err != nil { + return err + } + lo += uint64(len(ents)) + } + return nil +} + +// slice returns a slice of log entries from lo through hi-1, inclusive. +func (l *raftLog) slice(lo, hi uint64, maxSize entryEncodingSize) ([]pb.Entry, error) { + if err := l.mustCheckOutOfBounds(lo, hi); err != nil { + return nil, err + } + if lo == hi { + return nil, nil + } + if lo >= l.unstable.offset { + ents := limitSize(l.unstable.slice(lo, hi), maxSize) + // NB: use the full slice expression to protect the unstable slice from + // appends to the returned ents slice. + return ents[:len(ents):len(ents)], nil + } + + cut := min(hi, l.unstable.offset) + ents, err := l.storage.Entries(lo, cut, uint64(maxSize)) + if err == ErrCompacted { + return nil, err + } else if err == ErrUnavailable { + l.logger.Panicf("entries[%d:%d) is unavailable from storage", lo, cut) + } else if err != nil { + panic(err) // TODO(pavelkalinnikov): handle errors uniformly + } + if hi <= l.unstable.offset { + return ents, nil + } + + // Fast path to check if ents has reached the size limitation. Either the + // returned slice is shorter than requested (which means the next entry would + // bring it over the limit), or a single entry reaches the limit. + if uint64(len(ents)) < cut-lo { + return ents, nil + } + // Slow path computes the actual total size, so that unstable entries are cut + // optimally before being copied to ents slice. + size := entsSize(ents) + if size >= maxSize { + return ents, nil + } + + unstable := limitSize(l.unstable.slice(l.unstable.offset, hi), maxSize-size) + // Total size of unstable may exceed maxSize-size only if len(unstable) == 1. + // If this happens, ignore this extra entry. + if len(unstable) == 1 && size+entsSize(unstable) > maxSize { + return ents, nil + } + // Otherwise, total size of unstable does not exceed maxSize-size, so total + // size of ents+unstable does not exceed maxSize. Simply concatenate them. + return extend(ents, unstable), nil +} + +// l.firstIndex <= lo <= hi <= l.firstIndex + len(l.entries) +func (l *raftLog) mustCheckOutOfBounds(lo, hi uint64) error { + if lo > hi { + l.logger.Panicf("invalid slice %d > %d", lo, hi) + } + fi := l.firstIndex() + if lo < fi { + return ErrCompacted + } + + length := l.lastIndex() + 1 - fi + if hi > fi+length { + l.logger.Panicf("slice[%d,%d) out of bound [%d,%d]", lo, hi, fi, l.lastIndex()) + } + return nil +} + +func (l *raftLog) zeroTermOnOutOfBounds(t uint64, err error) uint64 { + if err == nil { + return t + } + if err == ErrCompacted || err == ErrUnavailable { + return 0 + } + l.logger.Panicf("unexpected error (%v)", err) + return 0 +} diff --git a/vendor/go.etcd.io/raft/v3/log_unstable.go b/vendor/go.etcd.io/raft/v3/log_unstable.go new file mode 100644 index 0000000000000..16cbdeff18ba3 --- /dev/null +++ b/vendor/go.etcd.io/raft/v3/log_unstable.go @@ -0,0 +1,245 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import pb "go.etcd.io/raft/v3/raftpb" + +// unstable contains "unstable" log entries and snapshot state that has +// not yet been written to Storage. The type serves two roles. First, it +// holds on to new log entries and an optional snapshot until they are +// handed to a Ready struct for persistence. Second, it continues to +// hold on to this state after it has been handed off to provide raftLog +// with a view of the in-progress log entries and snapshot until their +// writes have been stabilized and are guaranteed to be reflected in +// queries of Storage. After this point, the corresponding log entries +// and/or snapshot can be cleared from unstable. +// +// unstable.entries[i] has raft log position i+unstable.offset. +// Note that unstable.offset may be less than the highest log +// position in storage; this means that the next write to storage +// might need to truncate the log before persisting unstable.entries. +type unstable struct { + // the incoming unstable snapshot, if any. + snapshot *pb.Snapshot + // all entries that have not yet been written to storage. + entries []pb.Entry + // entries[i] has raft log position i+offset. + offset uint64 + + // if true, snapshot is being written to storage. + snapshotInProgress bool + // entries[:offsetInProgress-offset] are being written to storage. + // Like offset, offsetInProgress is exclusive, meaning that it + // contains the index following the largest in-progress entry. + // Invariant: offset <= offsetInProgress + offsetInProgress uint64 + + logger Logger +} + +// maybeFirstIndex returns the index of the first possible entry in entries +// if it has a snapshot. +func (u *unstable) maybeFirstIndex() (uint64, bool) { + if u.snapshot != nil { + return u.snapshot.Metadata.Index + 1, true + } + return 0, false +} + +// maybeLastIndex returns the last index if it has at least one +// unstable entry or snapshot. +func (u *unstable) maybeLastIndex() (uint64, bool) { + if l := len(u.entries); l != 0 { + return u.offset + uint64(l) - 1, true + } + if u.snapshot != nil { + return u.snapshot.Metadata.Index, true + } + return 0, false +} + +// maybeTerm returns the term of the entry at index i, if there +// is any. +func (u *unstable) maybeTerm(i uint64) (uint64, bool) { + if i < u.offset { + if u.snapshot != nil && u.snapshot.Metadata.Index == i { + return u.snapshot.Metadata.Term, true + } + return 0, false + } + + last, ok := u.maybeLastIndex() + if !ok { + return 0, false + } + if i > last { + return 0, false + } + + return u.entries[i-u.offset].Term, true +} + +// nextEntries returns the unstable entries that are not already in the process +// of being written to storage. +func (u *unstable) nextEntries() []pb.Entry { + inProgress := int(u.offsetInProgress - u.offset) + if len(u.entries) == inProgress { + return nil + } + return u.entries[inProgress:] +} + +// nextSnapshot returns the unstable snapshot, if one exists that is not already +// in the process of being written to storage. +func (u *unstable) nextSnapshot() *pb.Snapshot { + if u.snapshot == nil || u.snapshotInProgress { + return nil + } + return u.snapshot +} + +// acceptInProgress marks all entries and the snapshot, if any, in the unstable +// as having begun the process of being written to storage. The entries/snapshot +// will no longer be returned from nextEntries/nextSnapshot. However, new +// entries/snapshots added after a call to acceptInProgress will be returned +// from those methods, until the next call to acceptInProgress. +func (u *unstable) acceptInProgress() { + if len(u.entries) > 0 { + // NOTE: +1 because offsetInProgress is exclusive, like offset. + u.offsetInProgress = u.entries[len(u.entries)-1].Index + 1 + } + if u.snapshot != nil { + u.snapshotInProgress = true + } +} + +// stableTo marks entries up to the entry with the specified (index, term) as +// being successfully written to stable storage. +// +// The method should only be called when the caller can attest that the entries +// can not be overwritten by an in-progress log append. See the related comment +// in newStorageAppendRespMsg. +func (u *unstable) stableTo(i, t uint64) { + gt, ok := u.maybeTerm(i) + if !ok { + // Unstable entry missing. Ignore. + u.logger.Infof("entry at index %d missing from unstable log; ignoring", i) + return + } + if i < u.offset { + // Index matched unstable snapshot, not unstable entry. Ignore. + u.logger.Infof("entry at index %d matched unstable snapshot; ignoring", i) + return + } + if gt != t { + // Term mismatch between unstable entry and specified entry. Ignore. + // This is possible if part or all of the unstable log was replaced + // between that time that a set of entries started to be written to + // stable storage and when they finished. + u.logger.Infof("entry at (index,term)=(%d,%d) mismatched with "+ + "entry at (%d,%d) in unstable log; ignoring", i, t, i, gt) + return + } + num := int(i + 1 - u.offset) + u.entries = u.entries[num:] + u.offset = i + 1 + u.offsetInProgress = max(u.offsetInProgress, u.offset) + u.shrinkEntriesArray() +} + +// shrinkEntriesArray discards the underlying array used by the entries slice +// if most of it isn't being used. This avoids holding references to a bunch of +// potentially large entries that aren't needed anymore. Simply clearing the +// entries wouldn't be safe because clients might still be using them. +func (u *unstable) shrinkEntriesArray() { + // We replace the array if we're using less than half of the space in + // it. This number is fairly arbitrary, chosen as an attempt to balance + // memory usage vs number of allocations. It could probably be improved + // with some focused tuning. + const lenMultiple = 2 + if len(u.entries) == 0 { + u.entries = nil + } else if len(u.entries)*lenMultiple < cap(u.entries) { + newEntries := make([]pb.Entry, len(u.entries)) + copy(newEntries, u.entries) + u.entries = newEntries + } +} + +func (u *unstable) stableSnapTo(i uint64) { + if u.snapshot != nil && u.snapshot.Metadata.Index == i { + u.snapshot = nil + u.snapshotInProgress = false + } +} + +func (u *unstable) restore(s pb.Snapshot) { + u.offset = s.Metadata.Index + 1 + u.offsetInProgress = u.offset + u.entries = nil + u.snapshot = &s + u.snapshotInProgress = false +} + +func (u *unstable) truncateAndAppend(ents []pb.Entry) { + fromIndex := ents[0].Index + switch { + case fromIndex == u.offset+uint64(len(u.entries)): + // fromIndex is the next index in the u.entries, so append directly. + u.entries = append(u.entries, ents...) + case fromIndex <= u.offset: + u.logger.Infof("replace the unstable entries from index %d", fromIndex) + // The log is being truncated to before our current offset + // portion, so set the offset and replace the entries. + u.entries = ents + u.offset = fromIndex + u.offsetInProgress = u.offset + default: + // Truncate to fromIndex (exclusive), and append the new entries. + u.logger.Infof("truncate the unstable entries before index %d", fromIndex) + keep := u.slice(u.offset, fromIndex) // NB: appending to this slice is safe, + u.entries = append(keep, ents...) // and will reallocate/copy it + // Only in-progress entries before fromIndex are still considered to be + // in-progress. + u.offsetInProgress = min(u.offsetInProgress, fromIndex) + } +} + +// slice returns the entries from the unstable log with indexes in the range +// [lo, hi). The entire range must be stored in the unstable log or the method +// will panic. The returned slice can be appended to, but the entries in it must +// not be changed because they are still shared with unstable. +// +// TODO(pavelkalinnikov): this, and similar []pb.Entry slices, may bubble up all +// the way to the application code through Ready struct. Protect other slices +// similarly, and document how the client can use them. +func (u *unstable) slice(lo uint64, hi uint64) []pb.Entry { + u.mustCheckOutOfBounds(lo, hi) + // NB: use the full slice expression to limit what the caller can do with the + // returned slice. For example, an append will reallocate and copy this slice + // instead of corrupting the neighbouring u.entries. + return u.entries[lo-u.offset : hi-u.offset : hi-u.offset] +} + +// u.offset <= lo <= hi <= u.offset+len(u.entries) +func (u *unstable) mustCheckOutOfBounds(lo, hi uint64) { + if lo > hi { + u.logger.Panicf("invalid unstable.slice %d > %d", lo, hi) + } + upper := u.offset + uint64(len(u.entries)) + if lo < u.offset || hi > upper { + u.logger.Panicf("unstable.slice[%d,%d) out of bound [%d,%d]", lo, hi, u.offset, upper) + } +} diff --git a/vendor/go.etcd.io/etcd/raft/v3/logger.go b/vendor/go.etcd.io/raft/v3/logger.go similarity index 97% rename from vendor/go.etcd.io/etcd/raft/v3/logger.go rename to vendor/go.etcd.io/raft/v3/logger.go index dc73b1f21042e..e3cb00cc9d13b 100644 --- a/vendor/go.etcd.io/etcd/raft/v3/logger.go +++ b/vendor/go.etcd.io/raft/v3/logger.go @@ -16,7 +16,7 @@ package raft import ( "fmt" - "io/ioutil" + "io" "log" "os" "sync" @@ -60,7 +60,7 @@ func getLogger() Logger { var ( defaultLogger = &DefaultLogger{Logger: log.New(os.Stderr, "raft", log.LstdFlags)} - discardLogger = &DefaultLogger{Logger: log.New(ioutil.Discard, "", 0)} + discardLogger = &DefaultLogger{Logger: log.New(io.Discard, "", 0)} raftLoggerMu sync.Mutex raftLogger = Logger(defaultLogger) ) diff --git a/vendor/go.etcd.io/etcd/raft/v3/node.go b/vendor/go.etcd.io/raft/v3/node.go similarity index 79% rename from vendor/go.etcd.io/etcd/raft/v3/node.go rename to vendor/go.etcd.io/raft/v3/node.go index dca5954f7aeb9..e2a261cd7b80a 100644 --- a/vendor/go.etcd.io/etcd/raft/v3/node.go +++ b/vendor/go.etcd.io/raft/v3/node.go @@ -18,7 +18,7 @@ import ( "context" "errors" - pb "go.etcd.io/etcd/raft/v3/raftpb" + pb "go.etcd.io/raft/v3/raftpb" ) type SnapshotStatus int @@ -57,7 +57,12 @@ type Ready struct { // The current state of a Node to be saved to stable storage BEFORE // Messages are sent. + // // HardState will be equal to empty state if there is no update. + // + // If async storage writes are enabled, this field does not need to be acted + // on immediately. It will be reflected in a MsgStorageAppend message in the + // Messages slice. pb.HardState // ReadStates can be used for node to serve linearizable read requests locally @@ -68,24 +73,44 @@ type Ready struct { // Entries specifies entries to be saved to stable storage BEFORE // Messages are sent. + // + // If async storage writes are enabled, this field does not need to be acted + // on immediately. It will be reflected in a MsgStorageAppend message in the + // Messages slice. Entries []pb.Entry // Snapshot specifies the snapshot to be saved to stable storage. + // + // If async storage writes are enabled, this field does not need to be acted + // on immediately. It will be reflected in a MsgStorageAppend message in the + // Messages slice. Snapshot pb.Snapshot // CommittedEntries specifies entries to be committed to a - // store/state-machine. These have previously been committed to stable - // store. + // store/state-machine. These have previously been appended to stable + // storage. + // + // If async storage writes are enabled, this field does not need to be acted + // on immediately. It will be reflected in a MsgStorageApply message in the + // Messages slice. CommittedEntries []pb.Entry - // Messages specifies outbound messages to be sent AFTER Entries are - // committed to stable storage. + // Messages specifies outbound messages. + // + // If async storage writes are not enabled, these messages must be sent + // AFTER Entries are appended to stable storage. + // + // If async storage writes are enabled, these messages can be sent + // immediately as the messages that have the completion of the async writes + // as a precondition are attached to the individual MsgStorage{Append,Apply} + // messages instead. + // // If it contains a MsgSnap message, the application MUST report back to raft // when the snapshot has been received or has failed by calling ReportSnapshot. Messages []pb.Message - // MustSync indicates whether the HardState and Entries must be synchronously - // written to disk or if an asynchronous write is permissible. + // MustSync indicates whether the HardState and Entries must be durably + // written to disk or if a non-durable write is permissible. MustSync bool } @@ -103,25 +128,6 @@ func IsEmptySnap(sp pb.Snapshot) bool { return sp.Metadata.Index == 0 } -func (rd Ready) containsUpdates() bool { - return rd.SoftState != nil || !IsEmptyHardState(rd.HardState) || - !IsEmptySnap(rd.Snapshot) || len(rd.Entries) > 0 || - len(rd.CommittedEntries) > 0 || len(rd.Messages) > 0 || len(rd.ReadStates) != 0 -} - -// appliedCursor extracts from the Ready the highest index the client has -// applied (once the Ready is confirmed via Advance). If no information is -// contained in the Ready, returns zero. -func (rd Ready) appliedCursor() uint64 { - if n := len(rd.CommittedEntries); n > 0 { - return rd.CommittedEntries[n-1].Index - } - if index := rd.Snapshot.Metadata.Index; index > 0 { - return index - } - return 0 -} - // Node represents a node in a raft cluster. type Node interface { // Tick increments the internal logical clock for the Node by a single tick. Election @@ -150,7 +156,8 @@ type Node interface { Step(ctx context.Context, msg pb.Message) error // Ready returns a channel that returns the current point-in-time state. - // Users of the Node must call Advance after retrieving the state returned by Ready. + // Users of the Node must call Advance after retrieving the state returned by Ready (unless + // async storage writes is enabled, in which case it should never be called). // // NOTE: No committed entries from the next Ready may be applied until all committed entries // and snapshots from the previous one have finished. @@ -165,6 +172,9 @@ type Node interface { // commands. For example. when the last Ready contains a snapshot, the application might take // a long time to apply the snapshot data. To continue receiving Ready without blocking raft // progress, it can call Advance before finishing applying the last ready. + // + // NOTE: Advance must not be called when using AsyncStorageWrites. Response messages from the + // local append and apply threads take its place. Advance() // ApplyConfChange applies a config change (previously passed to // ProposeConfChange) to the node. This must be called whenever a config @@ -179,6 +189,32 @@ type Node interface { // TransferLeadership attempts to transfer leadership to the given transferee. TransferLeadership(ctx context.Context, lead, transferee uint64) + // ForgetLeader forgets a follower's current leader, changing it to None. It + // remains a leaderless follower in the current term, without campaigning. + // + // This is useful with PreVote+CheckQuorum, where followers will normally not + // grant pre-votes if they've heard from the leader in the past election + // timeout interval. Leaderless followers can grant pre-votes immediately, so + // if a quorum of followers have strong reason to believe the leader is dead + // (for example via a side-channel or external failure detector) and forget it + // then they can elect a new leader immediately, without waiting out the + // election timeout. They will also revert to normal followers if they hear + // from the leader again, or transition to candidates on an election timeout. + // + // For example, consider a three-node cluster where 1 is the leader and 2+3 + // have just received a heartbeat from it. If 2 and 3 believe the leader has + // now died (maybe they know that an orchestration system shut down 1's VM), + // we can instruct 2 to forget the leader and 3 to campaign. 2 will then be + // able to grant 3's pre-vote and elect 3 as leader immediately (normally 2 + // would reject the vote until an election timeout passes because it has heard + // from the leader recently). However, 3 can not campaign unilaterally, a + // quorum have to agree that the leader is dead, which avoids disrupting the + // leader if individual nodes are wrong about it being dead. + // + // This does nothing with ReadOnlyLeaseBased, since it would allow a new + // leader to be elected without the old leader knowing. + ForgetLeader(ctx context.Context) error + // ReadIndex request a read state. The read state will be set in the ready. // Read state has a read index. Once the application advances further than the read // index, any linearizable read requests issued before the read request can be @@ -211,11 +247,7 @@ type Peer struct { Context []byte } -// StartNode returns a new Node given configuration and a list of raft peers. -// It appends a ConfChangeAddNode entry for each given peer to the initial log. -// -// Peers must not be zero length; call RestartNode in that case. -func StartNode(c *Config, peers []Peer) Node { +func setupNode(c *Config, peers []Peer) *node { if len(peers) == 0 { panic("no peers given; use RestartNode instead") } @@ -223,12 +255,23 @@ func StartNode(c *Config, peers []Peer) Node { if err != nil { panic(err) } - rn.Bootstrap(peers) + err = rn.Bootstrap(peers) + if err != nil { + c.Logger.Warningf("error occurred during starting a new node: %v", err) + } n := newNode(rn) + return &n +} +// StartNode returns a new Node given configuration and a list of raft peers. +// It appends a ConfChangeAddNode entry for each given peer to the initial log. +// +// Peers must not be zero length; call RestartNode in that case. +func StartNode(c *Config, peers []Peer) Node { + n := setupNode(c, peers) go n.run() - return &n + return n } // RestartNode is similar to StartNode but does not take a list of peers. @@ -308,9 +351,7 @@ func (n *node) run() { lead := None for { - if advancec != nil { - readyc = nil - } else if n.rn.HasReady() { + if advancec == nil && n.rn.HasReady() { // Populate a Ready. Note that this Ready is not guaranteed to // actually be handled. We will arm readyc, but there's no guarantee // that we will actually send on it. It's possible that we will @@ -351,12 +392,13 @@ func (n *node) run() { close(pm.result) } case m := <-n.recvc: - // filter out response message from unknown From. - if pr := r.prs.Progress[m.From]; pr != nil || !IsResponseMsg(m.Type) { - r.Step(m) + if IsResponseMsg(m.Type) && !IsLocalMsgTarget(m.From) && r.trk.Progress[m.From] == nil { + // Filter out response message from unknown From. + break } + r.Step(m) case cc := <-n.confc: - _, okBefore := r.prs.Progress[r.id] + _, okBefore := r.trk.Progress[r.id] cs := r.applyConfChange(cc) // If the node was removed, block incoming proposals. Note that we // only do this if the node was in the config before. Nodes may be @@ -367,16 +409,18 @@ func (n *node) run() { // NB: propc is reset when the leader changes, which, if we learn // about it, sort of implies that we got readded, maybe? This isn't // very sound and likely has bugs. - if _, okAfter := r.prs.Progress[r.id]; okBefore && !okAfter { + if _, okAfter := r.trk.Progress[r.id]; okBefore && !okAfter { var found bool - outer: for _, sl := range [][]uint64{cs.Voters, cs.VotersOutgoing} { for _, id := range sl { if id == r.id { found = true - break outer + break } } + if found { + break + } } if !found { propc = nil @@ -390,7 +434,12 @@ func (n *node) run() { n.rn.Tick() case readyc <- rd: n.rn.acceptReady(rd) - advancec = n.advancec + if !n.rn.asyncStorageWrites { + advancec = n.advancec + } else { + rd = Ready{} + } + readyc = nil case <-advancec: n.rn.Advance(rd) rd = Ready{} @@ -422,8 +471,8 @@ func (n *node) Propose(ctx context.Context, data []byte) error { } func (n *node) Step(ctx context.Context, m pb.Message) error { - // ignore unexpected local messages receiving over network - if IsLocalMsg(m.Type) { + // Ignore unexpected local messages receiving over network. + if IsLocalMsg(m.Type) && !IsLocalMsgTarget(m.From) { // TODO: return an error? return nil } @@ -552,39 +601,10 @@ func (n *node) TransferLeadership(ctx context.Context, lead, transferee uint64) } } -func (n *node) ReadIndex(ctx context.Context, rctx []byte) error { - return n.step(ctx, pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}}) +func (n *node) ForgetLeader(ctx context.Context) error { + return n.step(ctx, pb.Message{Type: pb.MsgForgetLeader}) } -func newReady(r *raft, prevSoftSt *SoftState, prevHardSt pb.HardState) Ready { - rd := Ready{ - Entries: r.raftLog.unstableEntries(), - CommittedEntries: r.raftLog.nextEnts(), - Messages: r.msgs, - } - if softSt := r.softState(); !softSt.equal(prevSoftSt) { - rd.SoftState = softSt - } - if hardSt := r.hardState(); !isHardStateEqual(hardSt, prevHardSt) { - rd.HardState = hardSt - } - if r.raftLog.unstable.snapshot != nil { - rd.Snapshot = *r.raftLog.unstable.snapshot - } - if len(r.readStates) != 0 { - rd.ReadStates = r.readStates - } - rd.MustSync = MustSync(r.hardState(), prevHardSt, len(rd.Entries)) - return rd -} - -// MustSync returns true if the hard state and count of Raft entries indicate -// that a synchronous write to persistent storage is required. -func MustSync(st, prevst pb.HardState, entsnum int) bool { - // Persistent state on all servers: - // (Updated on stable storage before responding to RPCs) - // currentTerm - // votedFor - // log entries[] - return entsnum != 0 || st.Vote != prevst.Vote || st.Term != prevst.Term +func (n *node) ReadIndex(ctx context.Context, rctx []byte) error { + return n.step(ctx, pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}}) } diff --git a/vendor/go.etcd.io/etcd/raft/v3/quorum/joint.go b/vendor/go.etcd.io/raft/v3/quorum/joint.go similarity index 100% rename from vendor/go.etcd.io/etcd/raft/v3/quorum/joint.go rename to vendor/go.etcd.io/raft/v3/quorum/joint.go diff --git a/vendor/go.etcd.io/etcd/raft/v3/quorum/majority.go b/vendor/go.etcd.io/raft/v3/quorum/majority.go similarity index 97% rename from vendor/go.etcd.io/etcd/raft/v3/quorum/majority.go rename to vendor/go.etcd.io/raft/v3/quorum/majority.go index 8858a36b63413..12766137a9c1b 100644 --- a/vendor/go.etcd.io/etcd/raft/v3/quorum/majority.go +++ b/vendor/go.etcd.io/raft/v3/quorum/majority.go @@ -183,8 +183,7 @@ func (c MajorityConfig) VoteResult(votes map[uint64]bool) VoteResult { return VoteWon } - ny := [2]int{} // vote counts for no and yes, respectively - + var votedCnt int //vote counts for yes. var missing int for id := range c { v, ok := votes[id] @@ -193,17 +192,15 @@ func (c MajorityConfig) VoteResult(votes map[uint64]bool) VoteResult { continue } if v { - ny[1]++ - } else { - ny[0]++ + votedCnt++ } } q := len(c)/2 + 1 - if ny[1] >= q { + if votedCnt >= q { return VoteWon } - if ny[1]+missing >= q { + if votedCnt+missing >= q { return VotePending } return VoteLost diff --git a/vendor/go.etcd.io/etcd/raft/v3/quorum/quorum.go b/vendor/go.etcd.io/raft/v3/quorum/quorum.go similarity index 100% rename from vendor/go.etcd.io/etcd/raft/v3/quorum/quorum.go rename to vendor/go.etcd.io/raft/v3/quorum/quorum.go diff --git a/vendor/go.etcd.io/etcd/raft/v3/quorum/voteresult_string.go b/vendor/go.etcd.io/raft/v3/quorum/voteresult_string.go similarity index 100% rename from vendor/go.etcd.io/etcd/raft/v3/quorum/voteresult_string.go rename to vendor/go.etcd.io/raft/v3/quorum/voteresult_string.go diff --git a/vendor/go.etcd.io/etcd/raft/v3/raft.go b/vendor/go.etcd.io/raft/v3/raft.go similarity index 68% rename from vendor/go.etcd.io/etcd/raft/v3/raft.go rename to vendor/go.etcd.io/raft/v3/raft.go index c80262ebaf019..5a1505628a4be 100644 --- a/vendor/go.etcd.io/etcd/raft/v3/raft.go +++ b/vendor/go.etcd.io/raft/v3/raft.go @@ -16,24 +16,33 @@ package raft import ( "bytes" + "crypto/rand" "errors" "fmt" "math" - "math/rand" + "math/big" "sort" "strings" "sync" - "time" - "go.etcd.io/etcd/raft/v3/confchange" - "go.etcd.io/etcd/raft/v3/quorum" - pb "go.etcd.io/etcd/raft/v3/raftpb" - "go.etcd.io/etcd/raft/v3/tracker" + "go.etcd.io/raft/v3/confchange" + "go.etcd.io/raft/v3/quorum" + pb "go.etcd.io/raft/v3/raftpb" + "go.etcd.io/raft/v3/tracker" ) -// None is a placeholder node ID used when there is no leader. -const None uint64 = 0 -const noLimit = math.MaxUint64 +const ( + // None is a placeholder node ID used when there is no leader. + None uint64 = 0 + // LocalAppendThread is a reference to a local thread that saves unstable + // log entries and snapshots to stable storage. The identifier is used as a + // target for MsgStorageAppend messages when AsyncStorageWrites is enabled. + LocalAppendThread uint64 = math.MaxUint64 + // LocalApplyThread is a reference to a local thread that applies committed + // log entries to the local state machine. The identifier is used as a + // target for MsgStorageApply messages when AsyncStorageWrites is enabled. + LocalApplyThread uint64 = math.MaxUint64 - 1 +) // Possible values for StateType. const ( @@ -70,6 +79,8 @@ const ( campaignTransfer CampaignType = "CampaignTransfer" ) +const noLimit = math.MaxUint64 + // ErrProposalDropped is returned when the proposal is ignored by some cases, // so that the proposer can be notified and fail fast. var ErrProposalDropped = errors.New("raft proposal dropped") @@ -78,20 +89,17 @@ var ErrProposalDropped = errors.New("raft proposal dropped") // synchronization among multiple raft groups. Only the methods needed // by the code are exposed (e.g. Intn). type lockedRand struct { - mu sync.Mutex - rand *rand.Rand + mu sync.Mutex } func (r *lockedRand) Intn(n int) int { r.mu.Lock() - v := r.rand.Intn(n) + v, _ := rand.Int(rand.Reader, big.NewInt(int64(n))) r.mu.Unlock() - return v + return int(v.Int64()) } -var globalRand = &lockedRand{ - rand: rand.New(rand.NewSource(time.Now().UnixNano())), -} +var globalRand = &lockedRand{} // CampaignType represents the type of campaigning // the reason we use the type of string instead of uint64 @@ -109,7 +117,7 @@ var stmap = [...]string{ } func (st StateType) String() string { - return stmap[uint64(st)] + return stmap[st] } // Config contains the parameters to start a raft. @@ -140,6 +148,42 @@ type Config struct { // applied entries. This is a very application dependent configuration. Applied uint64 + // AsyncStorageWrites configures the raft node to write to its local storage + // (raft log and state machine) using a request/response message passing + // interface instead of the default Ready/Advance function call interface. + // Local storage messages can be pipelined and processed asynchronously + // (with respect to Ready iteration), facilitating reduced interference + // between Raft proposals and increased batching of log appends and state + // machine application. As a result, use of asynchronous storage writes can + // reduce end-to-end commit latency and increase maximum throughput. + // + // When true, the Ready.Message slice will include MsgStorageAppend and + // MsgStorageApply messages. The messages will target a LocalAppendThread + // and a LocalApplyThread, respectively. Messages to the same target must be + // reliably processed in order. In other words, they can't be dropped (like + // messages over the network) and those targeted at the same thread can't be + // reordered. Messages to different targets can be processed in any order. + // + // MsgStorageAppend carries Raft log entries to append, election votes / + // term changes / updated commit indexes to persist, and snapshots to apply. + // All writes performed in service of a MsgStorageAppend must be durable + // before response messages are delivered. However, if the MsgStorageAppend + // carries no response messages, durability is not required. The message + // assumes the role of the Entries, HardState, and Snapshot fields in Ready. + // + // MsgStorageApply carries committed entries to apply. Writes performed in + // service of a MsgStorageApply need not be durable before response messages + // are delivered. The message assumes the role of the CommittedEntries field + // in Ready. + // + // Local messages each carry one or more response messages which should be + // delivered after the corresponding storage write has been completed. These + // responses may target the same node or may target other nodes. The storage + // threads are not responsible for understanding the response messages, only + // for delivering them to the correct target after performing the storage + // write. + AsyncStorageWrites bool + // MaxSizePerMsg limits the max byte size of each append message. Smaller // value lowers the raft recovery cost(initial probing and message lost // during normal operation). On the other side, it might affect the @@ -147,7 +191,11 @@ type Config struct { // 0 for at most one entry per message. MaxSizePerMsg uint64 // MaxCommittedSizePerReady limits the size of the committed entries which - // can be applied. + // can be applying at the same time. + // + // Despite its name (preserved for compatibility), this quota applies across + // Ready structs to encompass all outstanding entries in unacknowledged + // MsgStorageApply messages when AsyncStorageWrites is enabled. MaxCommittedSizePerReady uint64 // MaxUncommittedEntriesSize limits the aggregate byte size of the // uncommitted entries that may be appended to a leader's log. Once this @@ -160,6 +208,16 @@ type Config struct { // overflowing that sending buffer. TODO (xiangli): feedback to application to // limit the proposal rate? MaxInflightMsgs int + // MaxInflightBytes limits the number of in-flight bytes in append messages. + // Complements MaxInflightMsgs. Ignored if zero. + // + // This effectively bounds the bandwidth-delay product. Note that especially + // in high-latency deployments setting this too low can lead to a dramatic + // reduction in throughput. For example, with a peer that has a round-trip + // latency of 100ms to the leader and this setting is set to 1 MB, there is a + // throughput limit of 10 MB/s for this group. With RTT of 400ms, this drops + // to 2.5 MB/s. See Little's law to understand the maths behind. + MaxInflightBytes uint64 // CheckQuorum specifies if the leader should check quorum activity. Leader // steps down when quorum is not active for an electionTimeout. @@ -196,12 +254,44 @@ type Config struct { // logical clock from assigning the timestamp and then forwarding the data // to the leader. DisableProposalForwarding bool + + // DisableConfChangeValidation turns off propose-time verification of + // configuration changes against the currently active configuration of the + // raft instance. These checks are generally sensible (cannot leave a joint + // config unless in a joint config, et cetera) but they have false positives + // because the active configuration may not be the most recent + // configuration. This is because configurations are activated during log + // application, and even the leader can trail log application by an + // unbounded number of entries. + // Symmetrically, the mechanism has false negatives - because the check may + // not run against the "actual" config that will be the predecessor of the + // newly proposed one, the check may pass but the new config may be invalid + // when it is being applied. In other words, the checks are best-effort. + // + // Users should *not* use this option unless they have a reliable mechanism + // (above raft) that serializes and verifies configuration changes. If an + // invalid configuration change enters the log and gets applied, a panic + // will result. + // + // This option may be removed once false positives are no longer possible. + // See: https://github.com/etcd-io/raft/issues/80 + DisableConfChangeValidation bool + + // StepDownOnRemoval makes the leader step down when it is removed from the + // group or demoted to a learner. + // + // This behavior will become unconditional in the future. See: + // https://github.com/etcd-io/raft/issues/83 + StepDownOnRemoval bool } func (c *Config) validate() error { if c.ID == None { return errors.New("cannot use none as id") } + if IsLocalMsgTarget(c.ID) { + return errors.New("cannot use local target as id") + } if c.HeartbeatTick <= 0 { return errors.New("heartbeat tick must be greater than 0") @@ -228,6 +318,11 @@ func (c *Config) validate() error { if c.MaxInflightMsgs <= 0 { return errors.New("max inflight messages must be greater than 0") } + if c.MaxInflightBytes == 0 { + c.MaxInflightBytes = noLimit + } else if c.MaxInflightBytes < c.MaxSizePerMsg { + return errors.New("max inflight bytes must be >= max message size") + } if c.Logger == nil { c.Logger = getLogger() @@ -251,17 +346,32 @@ type raft struct { // the log raftLog *raftLog - maxMsgSize uint64 - maxUncommittedSize uint64 - // TODO(tbg): rename to trk. - prs tracker.ProgressTracker + maxMsgSize entryEncodingSize + maxUncommittedSize entryPayloadSize + + trk tracker.ProgressTracker state StateType // isLearner is true if the local raft node is a learner. isLearner bool + // msgs contains the list of messages that should be sent out immediately to + // other nodes. + // + // Messages in this list must target other nodes. msgs []pb.Message + // msgsAfterAppend contains the list of messages that should be sent after + // the accumulated unstable state (e.g. term, vote, []entry, and snapshot) + // has been persisted to durable storage. This includes waiting for any + // unstable state that is already in the process of being persisted (i.e. + // has already been handed out in a prior Ready struct) to complete. + // + // Messages in this list may target other nodes or may target this node. + // + // Messages in this list have the type MsgAppResp, MsgVoteResp, or + // MsgPreVoteResp. See the comment in raft.send for details. + msgsAfterAppend []pb.Message // the leader id lead uint64 @@ -275,10 +385,13 @@ type raft struct { // be proposed if the leader's applied index is greater than this // value. pendingConfIndex uint64 + // disableConfChangeValidation is Config.DisableConfChangeValidation, + // see there for details. + disableConfChangeValidation bool // an estimate of the size of the uncommitted tail of the Raft log. Used to // prevent unbounded log growth. Only maintained by the leader. Reset on // term changes. - uncommittedSize uint64 + uncommittedSize entryPayloadSize readOnly *readOnly @@ -302,6 +415,7 @@ type raft struct { // when raft changes its state to follower or candidate. randomizedElectionTimeout int disableProposalForwarding bool + stepDownOnRemoval bool tick func() step stepFunc @@ -319,48 +433,50 @@ func newRaft(c *Config) *raft { if err := c.validate(); err != nil { panic(err.Error()) } - raftlog := newLogWithSize(c.Storage, c.Logger, c.MaxCommittedSizePerReady) + raftlog := newLogWithSize(c.Storage, c.Logger, entryEncodingSize(c.MaxCommittedSizePerReady)) hs, cs, err := c.Storage.InitialState() if err != nil { panic(err) // TODO(bdarnell) } r := &raft{ - id: c.ID, - lead: None, - isLearner: false, - raftLog: raftlog, - maxMsgSize: c.MaxSizePerMsg, - maxUncommittedSize: c.MaxUncommittedEntriesSize, - prs: tracker.MakeProgressTracker(c.MaxInflightMsgs), - electionTimeout: c.ElectionTick, - heartbeatTimeout: c.HeartbeatTick, - logger: c.Logger, - checkQuorum: c.CheckQuorum, - preVote: c.PreVote, - readOnly: newReadOnly(c.ReadOnlyOption), - disableProposalForwarding: c.DisableProposalForwarding, - } - - cfg, prs, err := confchange.Restore(confchange.Changer{ - Tracker: r.prs, + id: c.ID, + lead: None, + isLearner: false, + raftLog: raftlog, + maxMsgSize: entryEncodingSize(c.MaxSizePerMsg), + maxUncommittedSize: entryPayloadSize(c.MaxUncommittedEntriesSize), + trk: tracker.MakeProgressTracker(c.MaxInflightMsgs, c.MaxInflightBytes), + electionTimeout: c.ElectionTick, + heartbeatTimeout: c.HeartbeatTick, + logger: c.Logger, + checkQuorum: c.CheckQuorum, + preVote: c.PreVote, + readOnly: newReadOnly(c.ReadOnlyOption), + disableProposalForwarding: c.DisableProposalForwarding, + disableConfChangeValidation: c.DisableConfChangeValidation, + stepDownOnRemoval: c.StepDownOnRemoval, + } + + cfg, trk, err := confchange.Restore(confchange.Changer{ + Tracker: r.trk, LastIndex: raftlog.lastIndex(), }, cs) if err != nil { panic(err) } - assertConfStatesEquivalent(r.logger, cs, r.switchToConfig(cfg, prs)) + assertConfStatesEquivalent(r.logger, cs, r.switchToConfig(cfg, trk)) if !IsEmptyHardState(hs) { r.loadState(hs) } if c.Applied > 0 { - raftlog.appliedTo(c.Applied) + raftlog.appliedTo(c.Applied, 0 /* size */) } r.becomeFollower(r.Term, None) var nodesStrs []string - for _, n := range r.prs.VoterNodes() { + for _, n := range r.trk.VoterNodes() { nodesStrs = append(nodesStrs, fmt.Sprintf("%x", n)) } @@ -371,7 +487,7 @@ func newRaft(c *Config) *raft { func (r *raft) hasLeader() bool { return r.lead != None } -func (r *raft) softState() *SoftState { return &SoftState{Lead: r.lead, RaftState: r.state} } +func (r *raft) softState() SoftState { return SoftState{Lead: r.lead, RaftState: r.state} } func (r *raft) hardState() pb.HardState { return pb.HardState{ @@ -401,11 +517,11 @@ func (r *raft) send(m pb.Message) { // - MsgPreVoteResp: m.Term is the term received in the original // MsgPreVote if the pre-vote was granted, non-zero for the // same reasons MsgPreVote is - panic(fmt.Sprintf("term should be set when sending %s", m.Type)) + r.logger.Panicf("term should be set when sending %s", m.Type) } } else { if m.Term != 0 { - panic(fmt.Sprintf("term should not be set when sending %s (was %d)", m.Type, m.Term)) + r.logger.Panicf("term should not be set when sending %s (was %d)", m.Type, m.Term) } // do not attach term to MsgProp, MsgReadIndex // proposals are a way to forward to the leader and @@ -415,7 +531,59 @@ func (r *raft) send(m pb.Message) { m.Term = r.Term } } - r.msgs = append(r.msgs, m) + if m.Type == pb.MsgAppResp || m.Type == pb.MsgVoteResp || m.Type == pb.MsgPreVoteResp { + // If async storage writes are enabled, messages added to the msgs slice + // are allowed to be sent out before unstable state (e.g. log entry + // writes and election votes) have been durably synced to the local + // disk. + // + // For most message types, this is not an issue. However, response + // messages that relate to "voting" on either leader election or log + // appends require durability before they can be sent. It would be + // incorrect to publish a vote in an election before that vote has been + // synced to stable storage locally. Similarly, it would be incorrect to + // acknowledge a log append to the leader before that entry has been + // synced to stable storage locally. + // + // Per the Raft thesis, section 3.8 Persisted state and server restarts: + // + // > Raft servers must persist enough information to stable storage to + // > survive server restarts safely. In particular, each server persists + // > its current term and vote; this is necessary to prevent the server + // > from voting twice in the same term or replacing log entries from a + // > newer leader with those from a deposed leader. Each server also + // > persists new log entries before they are counted towards the entries’ + // > commitment; this prevents committed entries from being lost or + // > “uncommitted” when servers restart + // + // To enforce this durability requirement, these response messages are + // queued to be sent out as soon as the current collection of unstable + // state (the state that the response message was predicated upon) has + // been durably persisted. This unstable state may have already been + // passed to a Ready struct whose persistence is in progress or may be + // waiting for the next Ready struct to begin being written to Storage. + // These messages must wait for all of this state to be durable before + // being published. + // + // Rejected responses (m.Reject == true) present an interesting case + // where the durability requirement is less unambiguous. A rejection may + // be predicated upon unstable state. For instance, a node may reject a + // vote for one peer because it has already begun syncing its vote for + // another peer. Or it may reject a vote from one peer because it has + // unstable log entries that indicate that the peer is behind on its + // log. In these cases, it is likely safe to send out the rejection + // response immediately without compromising safety in the presence of a + // server restart. However, because these rejections are rare and + // because the safety of such behavior has not been formally verified, + // we err on the side of safety and omit a `&& !m.Reject` condition + // above. + r.msgsAfterAppend = append(r.msgsAfterAppend, m) + } else { + if m.To == r.id { + r.logger.Panicf("message should not be self-addressed when sending %s", m.Type) + } + r.msgs = append(r.msgs, m) + } } // sendAppend sends an append RPC with new entries (if any) and the @@ -430,15 +598,26 @@ func (r *raft) sendAppend(to uint64) { // ("empty" messages are useful to convey updated Commit indexes, but // are undesirable when we're sending multiple messages in a batch). func (r *raft) maybeSendAppend(to uint64, sendIfEmpty bool) bool { - pr := r.prs.Progress[to] + pr := r.trk.Progress[to] if pr.IsPaused() { return false } - m := pb.Message{} - m.To = to - term, errt := r.raftLog.term(pr.Next - 1) - ents, erre := r.raftLog.entries(pr.Next, r.maxMsgSize) + lastIndex, nextIndex := pr.Next-1, pr.Next + lastTerm, errt := r.raftLog.term(lastIndex) + + var ents []pb.Entry + var erre error + // In a throttled StateReplicate only send empty MsgApp, to ensure progress. + // Otherwise, if we had a full Inflights and all inflight messages were in + // fact dropped, replication to that follower would stall. Instead, an empty + // MsgApp will eventually reach the follower (heartbeats responses prompt the + // leader to send an append), allowing it to be acked or rejected, both of + // which will clear out Inflights. + if pr.State != tracker.StateReplicate || !pr.Inflights.Full() { + ents, erre = r.raftLog.entries(nextIndex, r.maxMsgSize) + } + if len(ents) == 0 && !sendIfEmpty { return false } @@ -449,7 +628,6 @@ func (r *raft) maybeSendAppend(to uint64, sendIfEmpty bool) bool { return false } - m.Type = pb.MsgSnap snapshot, err := r.raftLog.snapshot() if err != nil { if err == ErrSnapshotTemporarilyUnavailable { @@ -461,33 +639,29 @@ func (r *raft) maybeSendAppend(to uint64, sendIfEmpty bool) bool { if IsEmptySnap(snapshot) { panic("need non-empty snapshot") } - m.Snapshot = snapshot sindex, sterm := snapshot.Metadata.Index, snapshot.Metadata.Term r.logger.Debugf("%x [firstindex: %d, commit: %d] sent snapshot[index: %d, term: %d] to %x [%s]", r.id, r.raftLog.firstIndex(), r.raftLog.committed, sindex, sterm, to, pr) pr.BecomeSnapshot(sindex) r.logger.Debugf("%x paused sending replication messages to %x [%s]", r.id, to, pr) - } else { - m.Type = pb.MsgApp - m.Index = pr.Next - 1 - m.LogTerm = term - m.Entries = ents - m.Commit = r.raftLog.committed - if n := len(m.Entries); n != 0 { - switch pr.State { - // optimistically increase the next when in StateReplicate - case tracker.StateReplicate: - last := m.Entries[n-1].Index - pr.OptimisticUpdate(last) - pr.Inflights.Add(last) - case tracker.StateProbe: - pr.ProbeSent = true - default: - r.logger.Panicf("%x is sending append in unhandled state %s", r.id, pr.State) - } - } + + r.send(pb.Message{To: to, Type: pb.MsgSnap, Snapshot: &snapshot}) + return true } - r.send(m) + + // Send the actual MsgApp otherwise, and update the progress accordingly. + if err := pr.UpdateOnEntriesSend(len(ents), uint64(payloadsSize(ents)), nextIndex); err != nil { + r.logger.Panicf("%x: %v", r.id, err) + } + // NB: pr has been updated, but we make sure to only use its old values below. + r.send(pb.Message{ + To: to, + Type: pb.MsgApp, + Index: lastIndex, + LogTerm: lastTerm, + Entries: ents, + Commit: r.raftLog.committed, + }) return true } @@ -499,7 +673,7 @@ func (r *raft) sendHeartbeat(to uint64, ctx []byte) { // or it might not have all the committed entries. // The leader MUST NOT forward the follower's commit to // an unmatched index. - commit := min(r.prs.Progress[to].Match, r.raftLog.committed) + commit := min(r.trk.Progress[to].Match, r.raftLog.committed) m := pb.Message{ To: to, Type: pb.MsgHeartbeat, @@ -511,9 +685,9 @@ func (r *raft) sendHeartbeat(to uint64, ctx []byte) { } // bcastAppend sends RPC, with entries to all peers that are not up-to-date -// according to the progress recorded in r.prs. +// according to the progress recorded in r.trk. func (r *raft) bcastAppend() { - r.prs.Visit(func(id uint64, _ *tracker.Progress) { + r.trk.Visit(func(id uint64, _ *tracker.Progress) { if id == r.id { return } @@ -532,7 +706,7 @@ func (r *raft) bcastHeartbeat() { } func (r *raft) bcastHeartbeatWithCtx(ctx []byte) { - r.prs.Visit(func(id uint64, _ *tracker.Progress) { + r.trk.Visit(func(id uint64, _ *tracker.Progress) { if id == r.id { return } @@ -540,50 +714,46 @@ func (r *raft) bcastHeartbeatWithCtx(ctx []byte) { }) } -func (r *raft) advance(rd Ready) { - r.reduceUncommittedSize(rd.CommittedEntries) - - // If entries were applied (or a snapshot), update our cursor for - // the next Ready. Note that if the current HardState contains a - // new Commit index, this does not mean that we're also applying - // all of the new entries due to commit pagination by size. - if newApplied := rd.appliedCursor(); newApplied > 0 { - oldApplied := r.raftLog.applied - r.raftLog.appliedTo(newApplied) - - if r.prs.Config.AutoLeave && oldApplied <= r.pendingConfIndex && newApplied >= r.pendingConfIndex && r.state == StateLeader { - // If the current (and most recent, at least for this leader's term) - // configuration should be auto-left, initiate that now. We use a - // nil Data which unmarshals into an empty ConfChangeV2 and has the - // benefit that appendEntry can never refuse it based on its size - // (which registers as zero). - ent := pb.Entry{ - Type: pb.EntryConfChangeV2, - Data: nil, - } - // There's no way in which this proposal should be able to be rejected. - if !r.appendEntry(ent) { - panic("refused un-refusable auto-leaving ConfChangeV2") - } - r.pendingConfIndex = r.raftLog.lastIndex() - r.logger.Infof("initiating automatic transition out of joint configuration %s", r.prs.Config) +func (r *raft) appliedTo(index uint64, size entryEncodingSize) { + oldApplied := r.raftLog.applied + newApplied := max(index, oldApplied) + r.raftLog.appliedTo(newApplied, size) + + if r.trk.Config.AutoLeave && newApplied >= r.pendingConfIndex && r.state == StateLeader { + // If the current (and most recent, at least for this leader's term) + // configuration should be auto-left, initiate that now. We use a + // nil Data which unmarshals into an empty ConfChangeV2 and has the + // benefit that appendEntry can never refuse it based on its size + // (which registers as zero). + m, err := confChangeToMsg(nil) + if err != nil { + panic(err) + } + // NB: this proposal can't be dropped due to size, but can be + // dropped if a leadership transfer is in progress. We'll keep + // checking this condition on each applied entry, so either the + // leadership transfer will succeed and the new leader will leave + // the joint configuration, or the leadership transfer will fail, + // and we will propose the config change on the next advance. + if err := r.Step(m); err != nil { + r.logger.Debugf("not initiating automatic transition out of joint configuration %s: %v", r.trk.Config, err) + } else { + r.logger.Infof("initiating automatic transition out of joint configuration %s", r.trk.Config) } } +} - if len(rd.Entries) > 0 { - e := rd.Entries[len(rd.Entries)-1] - r.raftLog.stableTo(e.Index, e.Term) - } - if !IsEmptySnap(rd.Snapshot) { - r.raftLog.stableSnapTo(rd.Snapshot.Metadata.Index) - } +func (r *raft) appliedSnap(snap *pb.Snapshot) { + index := snap.Metadata.Index + r.raftLog.stableSnapTo(index) + r.appliedTo(index, 0 /* size */) } // maybeCommit attempts to advance the commit index. Returns true if // the commit index changed (in which case the caller should call // r.bcastAppend). func (r *raft) maybeCommit() bool { - mci := r.prs.Committed() + mci := r.trk.Committed() return r.raftLog.maybeCommit(mci, r.Term) } @@ -600,12 +770,12 @@ func (r *raft) reset(term uint64) { r.abortLeaderTransfer() - r.prs.ResetVotes() - r.prs.Visit(func(id uint64, pr *tracker.Progress) { + r.trk.ResetVotes() + r.trk.Visit(func(id uint64, pr *tracker.Progress) { *pr = tracker.Progress{ Match: 0, Next: r.raftLog.lastIndex() + 1, - Inflights: tracker.NewInflights(r.prs.MaxInflight), + Inflights: tracker.NewInflights(r.trk.MaxInflight, r.trk.MaxInflightBytes), IsLearner: pr.IsLearner, } if id == r.id { @@ -626,7 +796,7 @@ func (r *raft) appendEntry(es ...pb.Entry) (accepted bool) { } // Track the size of this uncommitted proposal. if !r.increaseUncommittedSize(es) { - r.logger.Debugf( + r.logger.Warningf( "%x appending new entries to log would exceed uncommitted entry size limit; dropping proposal", r.id, ) @@ -635,9 +805,17 @@ func (r *raft) appendEntry(es ...pb.Entry) (accepted bool) { } // use latest "last" index after truncate/append li = r.raftLog.append(es...) - r.prs.Progress[r.id].MaybeUpdate(li) - // Regardless of maybeCommit's return, our caller will call bcastAppend. - r.maybeCommit() + // The leader needs to self-ack the entries just appended once they have + // been durably persisted (since it doesn't send an MsgApp to itself). This + // response message will be added to msgsAfterAppend and delivered back to + // this node after these entries have been written to stable storage. When + // handled, this is roughly equivalent to: + // + // r.trk.Progress[r.id].MaybeUpdate(e.Index) + // if r.maybeCommit() { + // r.bcastAppend() + // } + r.send(pb.Message{To: r.id, Type: pb.MsgAppResp, Index: li}) return true } @@ -647,7 +825,9 @@ func (r *raft) tickElection() { if r.promotable() && r.pastElectionTimeout() { r.electionElapsed = 0 - r.Step(pb.Message{From: r.id, Type: pb.MsgHup}) + if err := r.Step(pb.Message{From: r.id, Type: pb.MsgHup}); err != nil { + r.logger.Debugf("error occurred during election: %v", err) + } } } @@ -659,7 +839,9 @@ func (r *raft) tickHeartbeat() { if r.electionElapsed >= r.electionTimeout { r.electionElapsed = 0 if r.checkQuorum { - r.Step(pb.Message{From: r.id, Type: pb.MsgCheckQuorum}) + if err := r.Step(pb.Message{From: r.id, Type: pb.MsgCheckQuorum}); err != nil { + r.logger.Debugf("error occurred during checking sending heartbeat: %v", err) + } } // If current leader cannot transfer leadership in electionTimeout, it becomes leader again. if r.state == StateLeader && r.leadTransferee != None { @@ -673,7 +855,9 @@ func (r *raft) tickHeartbeat() { if r.heartbeatElapsed >= r.heartbeatTimeout { r.heartbeatElapsed = 0 - r.Step(pb.Message{From: r.id, Type: pb.MsgBeat}) + if err := r.Step(pb.Message{From: r.id, Type: pb.MsgBeat}); err != nil { + r.logger.Debugf("error occurred during checking sending heartbeat: %v", err) + } } } @@ -708,7 +892,7 @@ func (r *raft) becomePreCandidate() { // but doesn't change anything else. In particular it does not increase // r.Term or change r.Vote. r.step = stepCandidate - r.prs.ResetVotes() + r.trk.ResetVotes() r.tick = r.tickElection r.lead = None r.state = StatePreCandidate @@ -729,7 +913,11 @@ func (r *raft) becomeLeader() { // (perhaps after having received a snapshot as a result). The leader is // trivially in this state. Note that r.reset() has initialized this // progress with the last index already. - r.prs.Progress[r.id].BecomeReplicate() + pr := r.trk.Progress[r.id] + pr.BecomeReplicate() + // The leader always has RecentActive == true; MsgCheckQuorum makes sure to + // preserve this. + pr.RecentActive = true // Conservatively set the pendingConfIndex to the last index in the // log. There may or may not be a pending config change, but it's @@ -743,11 +931,10 @@ func (r *raft) becomeLeader() { // This won't happen because we just called reset() above. r.logger.Panic("empty entry was dropped") } - // As a special case, don't count the initial empty entry towards the - // uncommitted log quota. This is because we want to preserve the - // behavior of allowing one entry larger than quota if the current - // usage is zero. - r.reduceUncommittedSize([]pb.Entry{emptyEnt}) + // The payloadSize of an empty entry is 0 (see TestPayloadSizeOfEmptyEntry), + // so the preceding log append does not count against the uncommitted log + // quota of the new leader. In other words, after the call to appendEntry, + // r.uncommittedSize is still 0. r.logger.Infof("%x became leader at term %d", r.id, r.Term) } @@ -761,12 +948,8 @@ func (r *raft) hup(t CampaignType) { r.logger.Warningf("%x is unpromotable and can not campaign", r.id) return } - ents, err := r.raftLog.slice(r.raftLog.applied+1, r.raftLog.committed+1, noLimit) - if err != nil { - r.logger.Panicf("unexpected error getting unapplied entries (%v)", err) - } - if n := numOfPendingConf(ents); n != 0 && r.raftLog.committed > r.raftLog.applied { - r.logger.Warningf("%x cannot campaign at term %d since there are still %d pending configuration changes to apply", r.id, r.Term, n) + if r.hasUnappliedConfChanges() { + r.logger.Warningf("%x cannot campaign at term %d since there are still pending configuration changes to apply", r.id, r.Term) return } @@ -774,6 +957,37 @@ func (r *raft) hup(t CampaignType) { r.campaign(t) } +// errBreak is a sentinel error used to break a callback-based loop. +var errBreak = errors.New("break") + +func (r *raft) hasUnappliedConfChanges() bool { + if r.raftLog.applied >= r.raftLog.committed { // in fact applied == committed + return false + } + found := false + // Scan all unapplied committed entries to find a config change. Paginate the + // scan, to avoid a potentially unlimited memory spike. + lo, hi := r.raftLog.applied+1, r.raftLog.committed+1 + // Reuse the maxApplyingEntsSize limit because it is used for similar purposes + // (limiting the read of unapplied committed entries) when raft sends entries + // via the Ready struct for application. + // TODO(pavelkalinnikov): find a way to budget memory/bandwidth for this scan + // outside the raft package. + pageSize := r.raftLog.maxApplyingEntsSize + if err := r.raftLog.scan(lo, hi, pageSize, func(ents []pb.Entry) error { + for i := range ents { + if ents[i].Type == pb.EntryConfChange || ents[i].Type == pb.EntryConfChangeV2 { + found = true + return errBreak + } + } + return nil + }); err != nil && err != errBreak { + r.logger.Panicf("error scanning unapplied entries [%d, %d): %v", lo, hi, err) + } + return found +} + // campaign transitions the raft instance to candidate state. This must only be // called after verifying that this is a legitimate transition. func (r *raft) campaign(t CampaignType) { @@ -794,19 +1008,9 @@ func (r *raft) campaign(t CampaignType) { voteMsg = pb.MsgVote term = r.Term } - if _, _, res := r.poll(r.id, voteRespMsgType(voteMsg), true); res == quorum.VoteWon { - // We won the election after voting for ourselves (which must mean that - // this is a single-node cluster). Advance to the next state. - if t == campaignPreElection { - r.campaign(campaignElection) - } else { - r.becomeLeader() - } - return - } var ids []uint64 { - idMap := r.prs.Voters.IDs() + idMap := r.trk.Voters.IDs() ids = make([]uint64, 0, len(idMap)) for id := range idMap { ids = append(ids, id) @@ -815,6 +1019,12 @@ func (r *raft) campaign(t CampaignType) { } for _, id := range ids { if id == r.id { + // The candidate votes for itself and should account for this self + // vote once the vote has been durably persisted (since it doesn't + // send a MsgVote to itself). This response message will be added to + // msgsAfterAppend and delivered back to this node after the vote + // has been written to stable storage. + r.send(pb.Message{To: id, Term: term, Type: voteRespMsgType(voteMsg)}) continue } r.logger.Infof("%x [logterm: %d, index: %d] sent %s request to %x at term %d", @@ -824,7 +1034,7 @@ func (r *raft) campaign(t CampaignType) { if t == campaignTransfer { ctx = []byte(t) } - r.send(pb.Message{Term: term, To: id, Type: voteMsg, Index: r.raftLog.lastIndex(), LogTerm: r.raftLog.lastTerm(), Context: ctx}) + r.send(pb.Message{To: id, Term: term, Type: voteMsg, Index: r.raftLog.lastIndex(), LogTerm: r.raftLog.lastTerm(), Context: ctx}) } } @@ -834,8 +1044,8 @@ func (r *raft) poll(id uint64, t pb.MessageType, v bool) (granted int, rejected } else { r.logger.Infof("%x received %s rejection from %x at term %d", r.id, t, id, r.Term) } - r.prs.RecordVote(id, v) - return r.prs.TallyVotes() + r.trk.RecordVote(id, v) + return r.trk.TallyVotes() } func (r *raft) Step(m pb.Message) error { @@ -905,6 +1115,21 @@ func (r *raft) Step(m pb.Message) error { r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected %s from %x [logterm: %d, index: %d] at term %d", r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term) r.send(pb.Message{To: m.From, Term: r.Term, Type: pb.MsgPreVoteResp, Reject: true}) + } else if m.Type == pb.MsgStorageAppendResp { + if m.Index != 0 { + // Don't consider the appended log entries to be stable because + // they may have been overwritten in the unstable log during a + // later term. See the comment in newStorageAppendResp for more + // about this race. + r.logger.Infof("%x [term: %d] ignored entry appends from a %s message with lower term [term: %d]", + r.id, r.Term, m.Type, m.Term) + } + if m.Snapshot != nil { + // Even if the snapshot applied under a different term, its + // application is still valid. Snapshots carry committed + // (term-independent) state. + r.appliedSnap(m.Snapshot) + } } else { // ignore other cases r.logger.Infof("%x [term: %d] ignored a %s message with lower term from %x [term: %d]", @@ -921,6 +1146,21 @@ func (r *raft) Step(m pb.Message) error { r.hup(campaignElection) } + case pb.MsgStorageAppendResp: + if m.Index != 0 { + r.raftLog.stableTo(m.Index, m.LogTerm) + } + if m.Snapshot != nil { + r.appliedSnap(m.Snapshot) + } + + case pb.MsgStorageApplyResp: + if len(m.Entries) > 0 { + index := m.Entries[len(m.Entries)-1].Index + r.appliedTo(index, entsSize(m.Entries)) + r.reduceUncommittedSize(payloadsSize(m.Entries)) + } + case pb.MsgVote, pb.MsgPreVote: // We can vote if this is a repeat of a vote we've already cast... canVote := r.Vote == m.From || @@ -989,22 +1229,13 @@ func stepLeader(r *raft, m pb.Message) error { r.bcastHeartbeat() return nil case pb.MsgCheckQuorum: - // The leader should always see itself as active. As a precaution, handle - // the case in which the leader isn't in the configuration any more (for - // example if it just removed itself). - // - // TODO(tbg): I added a TODO in removeNode, it doesn't seem that the - // leader steps down when removing itself. I might be missing something. - if pr := r.prs.Progress[r.id]; pr != nil { - pr.RecentActive = true - } - if !r.prs.QuorumActive() { + if !r.trk.QuorumActive() { r.logger.Warningf("%x stepped down to follower since quorum is not active", r.id) r.becomeFollower(r.Term, None) } // Mark everyone (but ourselves) as inactive in preparation for the next // CheckQuorum. - r.prs.Visit(func(id uint64, pr *tracker.Progress) { + r.trk.Visit(func(id uint64, pr *tracker.Progress) { if id != r.id { pr.RecentActive = false } @@ -1014,7 +1245,7 @@ func stepLeader(r *raft, m pb.Message) error { if len(m.Entries) == 0 { r.logger.Panicf("%x stepped empty MsgProp", r.id) } - if r.prs.Progress[r.id] == nil { + if r.trk.Progress[r.id] == nil { // If we are not currently a member of the range (i.e. this node // was removed from the configuration while serving as leader), // drop any new proposals. @@ -1043,20 +1274,20 @@ func stepLeader(r *raft, m pb.Message) error { } if cc != nil { alreadyPending := r.pendingConfIndex > r.raftLog.applied - alreadyJoint := len(r.prs.Config.Voters[1]) > 0 + alreadyJoint := len(r.trk.Config.Voters[1]) > 0 wantsLeaveJoint := len(cc.AsV2().Changes) == 0 - var refused string + var failedCheck string if alreadyPending { - refused = fmt.Sprintf("possible unapplied conf change at index %d (applied to %d)", r.pendingConfIndex, r.raftLog.applied) + failedCheck = fmt.Sprintf("possible unapplied conf change at index %d (applied to %d)", r.pendingConfIndex, r.raftLog.applied) } else if alreadyJoint && !wantsLeaveJoint { - refused = "must transition out of joint config first" + failedCheck = "must transition out of joint config first" } else if !alreadyJoint && wantsLeaveJoint { - refused = "not in joint state; refusing empty conf change" + failedCheck = "not in joint state; refusing empty conf change" } - if refused != "" { - r.logger.Infof("%x ignoring conf change %v at config %s: %s", r.id, cc, r.prs.Config, refused) + if failedCheck != "" && !r.disableConfChangeValidation { + r.logger.Infof("%x ignoring conf change %v at config %s: %s", r.id, cc, r.trk.Config, failedCheck) m.Entries[i] = pb.Entry{Type: pb.EntryNormal} } else { r.pendingConfIndex = r.raftLog.lastIndex() + uint64(i) + 1 @@ -1071,7 +1302,7 @@ func stepLeader(r *raft, m pb.Message) error { return nil case pb.MsgReadIndex: // only one voting member (the leader) in the cluster - if r.prs.IsSingleton() { + if r.trk.IsSingleton() { if resp := r.responseToReadIndexReq(m, r.raftLog.committed); resp.To != None { r.send(resp) } @@ -1088,16 +1319,21 @@ func stepLeader(r *raft, m pb.Message) error { sendMsgReadIndexResponse(r, m) return nil + case pb.MsgForgetLeader: + return nil // noop on leader } // All other message types require a progress for m.From (pr). - pr := r.prs.Progress[m.From] + pr := r.trk.Progress[m.From] if pr == nil { r.logger.Debugf("%x no progress available for %x", r.id, m.From) return nil } switch m.Type { case pb.MsgAppResp: + // NB: this code path is also hit from (*raft).advance, where the leader steps + // an MsgAppResp to acknowledge the appended entries in the last Ready. + pr.RecentActive = true if m.Reject { @@ -1148,7 +1384,7 @@ func stepLeader(r *raft, m pb.Message) error { // the rejection's log term. If a probe at one of these indexes // succeeded, its log term at that index would match the leader's, // i.e. 3 or 5 in this example. But the follower already told the - // leader that it is still at term 2 at index 9, and since the + // leader that it is still at term 2 at index 6, and since the // log term only ever goes up (within a log), this is a contradiction. // // At index 1, however, the leader can draw no such conclusion, @@ -1219,7 +1455,7 @@ func stepLeader(r *raft, m pb.Message) error { // 7, the rejection points it at the end of the follower's log // which is at a higher log term than the actually committed // log. - nextProbeIdx = r.raftLog.findConflictByTerm(m.RejectHint, m.LogTerm) + nextProbeIdx, _ = r.raftLog.findConflictByTerm(m.RejectHint, m.LogTerm) } if pr.MaybeDecrTo(m.Index, nextProbeIdx) { r.logger.Debugf("%x decreased progress of %x to [%s]", r.id, m.From, pr) @@ -1230,14 +1466,25 @@ func stepLeader(r *raft, m pb.Message) error { } } else { oldPaused := pr.IsPaused() - if pr.MaybeUpdate(m.Index) { + // We want to update our tracking if the response updates our + // matched index or if the response can move a probing peer back + // into StateReplicate (see heartbeat_rep_recovers_from_probing.txt + // for an example of the latter case). + // NB: the same does not make sense for StateSnapshot - if `m.Index` + // equals pr.Match we know we don't m.Index+1 in our log, so moving + // back to replicating state is not useful; besides pr.PendingSnapshot + // would prevent it. + if pr.MaybeUpdate(m.Index) || (pr.Match == m.Index && pr.State == tracker.StateProbe) { switch { case pr.State == tracker.StateProbe: pr.BecomeReplicate() - case pr.State == tracker.StateSnapshot && pr.Match >= pr.PendingSnapshot: - // TODO(tbg): we should also enter this branch if a snapshot is - // received that is below pr.PendingSnapshot but which makes it - // possible to use the log again. + case pr.State == tracker.StateSnapshot && pr.Match+1 >= r.raftLog.firstIndex(): + // Note that we don't take into account PendingSnapshot to + // enter this branch. No matter at which index a snapshot + // was actually applied, as long as this allows catching up + // the follower from the log, we will accept it. This gives + // systems more flexibility in how they implement snapshots; + // see the comments on PendingSnapshot. r.logger.Debugf("%x recovered from needing snapshot, resumed sending replication messages to %x [%s]", r.id, m.From, pr) // Transition back to replicating state via probing state // (which takes the snapshot into account). If we didn't @@ -1266,7 +1513,9 @@ func stepLeader(r *raft, m pb.Message) error { // replicate, or when freeTo() covers multiple messages). If // we have more entries to send, send as many messages as we // can (without sending empty messages for the commit index) - for r.maybeSendAppend(m.From, false) { + if r.id != m.From { + for r.maybeSendAppend(m.From, false /* sendIfEmpty */) { + } } // Transfer leadership is in progress. if m.From == r.leadTransferee && pr.Match == r.raftLog.lastIndex() { @@ -1277,13 +1526,22 @@ func stepLeader(r *raft, m pb.Message) error { } case pb.MsgHeartbeatResp: pr.RecentActive = true - pr.ProbeSent = false + pr.MsgAppFlowPaused = false - // free one slot for the full inflights window to allow progress. - if pr.State == tracker.StateReplicate && pr.Inflights.Full() { - pr.Inflights.FreeFirstOne() - } - if pr.Match < r.raftLog.lastIndex() { + // NB: if the follower is paused (full Inflights), this will still send an + // empty append, allowing it to recover from situations in which all the + // messages that filled up Inflights in the first place were dropped. Note + // also that the outgoing heartbeat already communicated the commit index. + // + // If the follower is fully caught up but also in StateProbe (as can happen + // if ReportUnreachable was called), we also want to send an append (it will + // be empty) to allow the follower to transition back to StateReplicate once + // it responds. + // + // Note that StateSnapshot typically satisfies pr.Match < lastIndex, but + // `pr.Paused()` is always true for StateSnapshot, so sendAppend is a + // no-op. + if pr.Match < r.raftLog.lastIndex() || pr.State == tracker.StateProbe { r.sendAppend(m.From) } @@ -1291,7 +1549,7 @@ func stepLeader(r *raft, m pb.Message) error { return nil } - if r.prs.Voters.VoteResult(r.readOnly.recvAck(m.From, m.Context)) != quorum.VoteWon { + if r.trk.Voters.VoteResult(r.readOnly.recvAck(m.From, m.Context)) != quorum.VoteWon { return nil } @@ -1305,10 +1563,6 @@ func stepLeader(r *raft, m pb.Message) error { if pr.State != tracker.StateSnapshot { return nil } - // TODO(tbg): this code is very similar to the snapshot handling in - // MsgAppResp above. In fact, the code there is more correct than the - // code here and should likely be updated to match (or even better, the - // logic pulled into a newly created Progress state machine handler). if !m.Reject { pr.BecomeProbe() r.logger.Debugf("%x snapshot succeeded, resumed sending replication messages to %x [%s]", r.id, m.From, pr) @@ -1322,7 +1576,7 @@ func stepLeader(r *raft, m pb.Message) error { // If snapshot finish, wait for the MsgAppResp from the remote node before sending // out the next MsgApp. // If snapshot failure, wait for a heartbeat interval before next try - pr.ProbeSent = true + pr.MsgAppFlowPaused = true case pb.MsgUnreachable: // During optimistic replication, if the remote becomes unreachable, // there is huge probability that a MsgApp is lost. @@ -1443,6 +1697,15 @@ func stepFollower(r *raft, m pb.Message) error { } m.To = r.lead r.send(m) + case pb.MsgForgetLeader: + if r.readOnly.option == ReadOnlyLeaseBased { + r.logger.Error("ignoring MsgForgetLeader due to ReadOnlyLeaseBased") + return nil + } + if r.lead != None { + r.logger.Infof("%x forgetting leader %x at term %d", r.id, r.lead, r.Term) + r.lead = None + } case pb.MsgTimeoutNow: r.logger.Infof("%x [term %d] received MsgTimeoutNow from %x and starts an election to get leadership.", r.id, r.Term, m.From) // Leadership transfers never use pre-vote even if r.preVote is true; we @@ -1471,37 +1734,39 @@ func (r *raft) handleAppendEntries(m pb.Message) { r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed}) return } - if mlastIndex, ok := r.raftLog.maybeAppend(m.Index, m.LogTerm, m.Commit, m.Entries...); ok { r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: mlastIndex}) - } else { - r.logger.Debugf("%x [logterm: %d, index: %d] rejected MsgApp [logterm: %d, index: %d] from %x", - r.id, r.raftLog.zeroTermOnErrCompacted(r.raftLog.term(m.Index)), m.Index, m.LogTerm, m.Index, m.From) - - // Return a hint to the leader about the maximum index and term that the - // two logs could be divergent at. Do this by searching through the - // follower's log for the maximum (index, term) pair with a term <= the - // MsgApp's LogTerm and an index <= the MsgApp's Index. This can help - // skip all indexes in the follower's uncommitted tail with terms - // greater than the MsgApp's LogTerm. - // - // See the other caller for findConflictByTerm (in stepLeader) for a much - // more detailed explanation of this mechanism. - hintIndex := min(m.Index, r.raftLog.lastIndex()) - hintIndex = r.raftLog.findConflictByTerm(hintIndex, m.LogTerm) - hintTerm, err := r.raftLog.term(hintIndex) - if err != nil { - panic(fmt.Sprintf("term(%d) must be valid, but got %v", hintIndex, err)) - } - r.send(pb.Message{ - To: m.From, - Type: pb.MsgAppResp, - Index: m.Index, - Reject: true, - RejectHint: hintIndex, - LogTerm: hintTerm, - }) + return } + r.logger.Debugf("%x [logterm: %d, index: %d] rejected MsgApp [logterm: %d, index: %d] from %x", + r.id, r.raftLog.zeroTermOnOutOfBounds(r.raftLog.term(m.Index)), m.Index, m.LogTerm, m.Index, m.From) + + // Our log does not match the leader's at index m.Index. Return a hint to the + // leader - a guess on the maximal (index, term) at which the logs match. Do + // this by searching through the follower's log for the maximum (index, term) + // pair with a term <= the MsgApp's LogTerm and an index <= the MsgApp's + // Index. This can help skip all indexes in the follower's uncommitted tail + // with terms greater than the MsgApp's LogTerm. + // + // See the other caller for findConflictByTerm (in stepLeader) for a much more + // detailed explanation of this mechanism. + + // NB: m.Index >= raftLog.committed by now (see the early return above), and + // raftLog.lastIndex() >= raftLog.committed by invariant, so min of the two is + // also >= raftLog.committed. Hence, the findConflictByTerm argument is within + // the valid interval, which then will return a valid (index, term) pair with + // a non-zero term (unless the log is empty). However, it is safe to send a zero + // LogTerm in this response in any case, so we don't verify it here. + hintIndex := min(m.Index, r.raftLog.lastIndex()) + hintIndex, hintTerm := r.raftLog.findConflictByTerm(hintIndex, m.LogTerm) + r.send(pb.Message{ + To: m.From, + Type: pb.MsgAppResp, + Index: m.Index, + Reject: true, + RejectHint: hintIndex, + LogTerm: hintTerm, + }) } func (r *raft) handleHeartbeat(m pb.Message) { @@ -1510,8 +1775,14 @@ func (r *raft) handleHeartbeat(m pb.Message) { } func (r *raft) handleSnapshot(m pb.Message) { - sindex, sterm := m.Snapshot.Metadata.Index, m.Snapshot.Metadata.Term - if r.restore(m.Snapshot) { + // MsgSnap messages should always carry a non-nil Snapshot, but err on the + // side of safety and treat a nil Snapshot as a zero-valued Snapshot. + var s pb.Snapshot + if m.Snapshot != nil { + s = *m.Snapshot + } + sindex, sterm := s.Metadata.Index, s.Metadata.Term + if r.restore(s) { r.logger.Infof("%x [commit: %d] restored snapshot [index: %d, term: %d]", r.id, r.raftLog.committed, sindex, sterm) r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.lastIndex()}) @@ -1585,9 +1856,9 @@ func (r *raft) restore(s pb.Snapshot) bool { r.raftLog.restore(s) // Reset the configuration and add the (potentially updated) peers in anew. - r.prs = tracker.MakeProgressTracker(r.prs.MaxInflight) - cfg, prs, err := confchange.Restore(confchange.Changer{ - Tracker: r.prs, + r.trk = tracker.MakeProgressTracker(r.trk.MaxInflight, r.trk.MaxInflightBytes) + cfg, trk, err := confchange.Restore(confchange.Changer{ + Tracker: r.trk, LastIndex: r.raftLog.lastIndex(), }, cs) @@ -1597,9 +1868,9 @@ func (r *raft) restore(s pb.Snapshot) bool { panic(fmt.Sprintf("unable to restore config %+v: %s", cs, err)) } - assertConfStatesEquivalent(r.logger, cs, r.switchToConfig(cfg, prs)) + assertConfStatesEquivalent(r.logger, cs, r.switchToConfig(cfg, trk)) - pr := r.prs.Progress[r.id] + pr := r.trk.Progress[r.id] pr.MaybeUpdate(pr.Next - 1) // TODO(tbg): this is untested and likely unneeded r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] restored snapshot [index: %d, term: %d]", @@ -1610,14 +1881,14 @@ func (r *raft) restore(s pb.Snapshot) bool { // promotable indicates whether state machine can be promoted to leader, // which is true when its own id is in progress list. func (r *raft) promotable() bool { - pr := r.prs.Progress[r.id] - return pr != nil && !pr.IsLearner && !r.raftLog.hasPendingSnapshot() + pr := r.trk.Progress[r.id] + return pr != nil && !pr.IsLearner && !r.raftLog.hasNextOrInProgressSnapshot() } func (r *raft) applyConfChange(cc pb.ConfChangeV2) pb.ConfState { - cfg, prs, err := func() (tracker.Config, tracker.ProgressMap, error) { + cfg, trk, err := func() (tracker.Config, tracker.ProgressMap, error) { changer := confchange.Changer{ - Tracker: r.prs, + Tracker: r.trk, LastIndex: r.raftLog.lastIndex(), } if cc.LeaveJoint() { @@ -1633,7 +1904,7 @@ func (r *raft) applyConfChange(cc pb.ConfChangeV2) pb.ConfState { panic(err) } - return r.switchToConfig(cfg, prs) + return r.switchToConfig(cfg, trk) } // switchToConfig reconfigures this node to use the provided configuration. It @@ -1642,28 +1913,30 @@ func (r *raft) applyConfChange(cc pb.ConfChangeV2) pb.ConfState { // requirements. // // The inputs usually result from restoring a ConfState or applying a ConfChange. -func (r *raft) switchToConfig(cfg tracker.Config, prs tracker.ProgressMap) pb.ConfState { - r.prs.Config = cfg - r.prs.Progress = prs +func (r *raft) switchToConfig(cfg tracker.Config, trk tracker.ProgressMap) pb.ConfState { + r.trk.Config = cfg + r.trk.Progress = trk - r.logger.Infof("%x switched to configuration %s", r.id, r.prs.Config) - cs := r.prs.ConfState() - pr, ok := r.prs.Progress[r.id] + r.logger.Infof("%x switched to configuration %s", r.id, r.trk.Config) + cs := r.trk.ConfState() + pr, ok := r.trk.Progress[r.id] // Update whether the node itself is a learner, resetting to false when the // node is removed. r.isLearner = ok && pr.IsLearner if (!ok || r.isLearner) && r.state == StateLeader { - // This node is leader and was removed or demoted. We prevent demotions - // at the time writing but hypothetically we handle them the same way as - // removing the leader: stepping down into the next Term. + // This node is leader and was removed or demoted, step down if requested. // - // TODO(tbg): step down (for sanity) and ask follower with largest Match - // to TimeoutNow (to avoid interruption). This might still drop some - // proposals but it's better than nothing. + // We prevent demotions at the time writing but hypothetically we handle + // them the same way as removing the leader. // - // TODO(tbg): test this branch. It is untested at the time of writing. + // TODO(tbg): ask follower with largest Match to TimeoutNow (to avoid + // interruption). This might still drop some proposals but it's better than + // nothing. + if r.stepDownOnRemoval { + r.becomeFollower(r.Term, None) + } return cs } @@ -1681,12 +1954,15 @@ func (r *raft) switchToConfig(cfg tracker.Config, prs tracker.ProgressMap) pb.Co // Otherwise, still probe the newly added replicas; there's no reason to // let them wait out a heartbeat interval (or the next incoming // proposal). - r.prs.Visit(func(id uint64, pr *tracker.Progress) { + r.trk.Visit(func(id uint64, pr *tracker.Progress) { + if id == r.id { + return + } r.maybeSendAppend(id, false /* sendIfEmpty */) }) } - // If the the leadTransferee was removed or demoted, abort the leadership transfer. - if _, tOK := r.prs.Config.Voters.IDs()[r.leadTransferee]; !tOK && r.leadTransferee != 0 { + // If the leadTransferee was removed or demoted, abort the leadership transfer. + if _, tOK := r.trk.Config.Voters.IDs()[r.leadTransferee]; !tOK && r.leadTransferee != 0 { r.abortLeaderTransfer() } @@ -1702,7 +1978,7 @@ func (r *raft) loadState(state pb.HardState) { r.Vote = state.Vote } -// pastElectionTimeout returns true iff r.electionElapsed is greater +// pastElectionTimeout returns true if r.electionElapsed is greater // than or equal to the randomized election timeout in // [electiontimeout, 2 * electiontimeout - 1]. func (r *raft) pastElectionTimeout() bool { @@ -1723,7 +1999,9 @@ func (r *raft) abortLeaderTransfer() { // committedEntryInCurrentTerm return true if the peer has committed an entry in its term. func (r *raft) committedEntryInCurrentTerm() bool { - return r.raftLog.zeroTermOnErrCompacted(r.raftLog.term(r.raftLog.committed)) == r.Term + // NB: r.Term is never 0 on a leader, so if zeroTermOnOutOfBounds returns 0, + // we won't see it as a match with r.Term. + return r.raftLog.zeroTermOnOutOfBounds(r.raftLog.term(r.raftLog.committed)) == r.Term } // responseToReadIndexReq constructs a response for `req`. If `req` comes from the peer @@ -1753,11 +2031,7 @@ func (r *raft) responseToReadIndexReq(req pb.Message, readIndex uint64) pb.Messa // Empty payloads are never refused. This is used both for appending an empty // entry at a new leader's term, as well as leaving a joint configuration. func (r *raft) increaseUncommittedSize(ents []pb.Entry) bool { - var s uint64 - for _, e := range ents { - s += uint64(PayloadSize(e)) - } - + s := payloadsSize(ents) if r.uncommittedSize > 0 && s > 0 && r.uncommittedSize+s > r.maxUncommittedSize { // If the uncommitted tail of the Raft log is empty, allow any size // proposal. Otherwise, limit the size of the uncommitted tail of the @@ -1774,16 +2048,7 @@ func (r *raft) increaseUncommittedSize(ents []pb.Entry) bool { // reduceUncommittedSize accounts for the newly committed entries by decreasing // the uncommitted entry size limit. -func (r *raft) reduceUncommittedSize(ents []pb.Entry) { - if r.uncommittedSize == 0 { - // Fast-path for followers, who do not track or enforce the limit. - return - } - - var s uint64 - for _, e := range ents { - s += uint64(PayloadSize(e)) - } +func (r *raft) reduceUncommittedSize(s entryPayloadSize) { if s > r.uncommittedSize { // uncommittedSize may underestimate the size of the uncommitted Raft // log tail but will never overestimate it. Saturate at 0 instead of @@ -1794,17 +2059,12 @@ func (r *raft) reduceUncommittedSize(ents []pb.Entry) { } } -func numOfPendingConf(ents []pb.Entry) int { - n := 0 - for i := range ents { - if ents[i].Type == pb.EntryConfChange || ents[i].Type == pb.EntryConfChangeV2 { - n++ - } - } - return n -} - func releasePendingReadIndexMessages(r *raft) { + if len(r.pendingReadIndexMessages) == 0 { + // Fast path for the common case to avoid a call to storage.LastIndex() + // via committedEntryInCurrentTerm. + return + } if !r.committedEntryInCurrentTerm() { r.logger.Error("pending MsgReadIndex should be released only after first commit in current term") return diff --git a/vendor/go.etcd.io/etcd/raft/v3/raftpb/confchange.go b/vendor/go.etcd.io/raft/v3/raftpb/confchange.go similarity index 95% rename from vendor/go.etcd.io/etcd/raft/v3/raftpb/confchange.go rename to vendor/go.etcd.io/raft/v3/raftpb/confchange.go index 47fae65dfe1b9..a3ddff62fd0df 100644 --- a/vendor/go.etcd.io/etcd/raft/v3/raftpb/confchange.go +++ b/vendor/go.etcd.io/raft/v3/raftpb/confchange.go @@ -35,7 +35,13 @@ func MarshalConfChange(c ConfChangeI) (EntryType, []byte, error) { var typ EntryType var ccdata []byte var err error - if ccv1, ok := c.AsV1(); ok { + if c == nil { + // A nil data unmarshals into an empty ConfChangeV2 and has the benefit + // that appendEntry can never refuse it based on its size (which + // registers as zero). + typ = EntryConfChangeV2 + ccdata = nil + } else if ccv1, ok := c.AsV1(); ok { typ = EntryConfChange ccdata, err = ccv1.Marshal() } else { diff --git a/vendor/go.etcd.io/etcd/raft/v3/raftpb/confstate.go b/vendor/go.etcd.io/raft/v3/raftpb/confstate.go similarity index 100% rename from vendor/go.etcd.io/etcd/raft/v3/raftpb/confstate.go rename to vendor/go.etcd.io/raft/v3/raftpb/confstate.go diff --git a/vendor/go.etcd.io/etcd/raft/v3/raftpb/raft.pb.go b/vendor/go.etcd.io/raft/v3/raftpb/raft.pb.go similarity index 84% rename from vendor/go.etcd.io/etcd/raft/v3/raftpb/raft.pb.go rename to vendor/go.etcd.io/raft/v3/raftpb/raft.pb.go index 1ee77a9a45766..7dcdef0cd6ef8 100644 --- a/vendor/go.etcd.io/etcd/raft/v3/raftpb/raft.pb.go +++ b/vendor/go.etcd.io/raft/v3/raftpb/raft.pb.go @@ -68,29 +68,34 @@ func (EntryType) EnumDescriptor() ([]byte, []int) { } // For description of different message types, see: -// https://pkg.go.dev/go.etcd.io/etcd/raft/v3#hdr-MessageType +// https://pkg.go.dev/go.etcd.io/raft/v3#hdr-MessageType type MessageType int32 const ( - MsgHup MessageType = 0 - MsgBeat MessageType = 1 - MsgProp MessageType = 2 - MsgApp MessageType = 3 - MsgAppResp MessageType = 4 - MsgVote MessageType = 5 - MsgVoteResp MessageType = 6 - MsgSnap MessageType = 7 - MsgHeartbeat MessageType = 8 - MsgHeartbeatResp MessageType = 9 - MsgUnreachable MessageType = 10 - MsgSnapStatus MessageType = 11 - MsgCheckQuorum MessageType = 12 - MsgTransferLeader MessageType = 13 - MsgTimeoutNow MessageType = 14 - MsgReadIndex MessageType = 15 - MsgReadIndexResp MessageType = 16 - MsgPreVote MessageType = 17 - MsgPreVoteResp MessageType = 18 + MsgHup MessageType = 0 + MsgBeat MessageType = 1 + MsgProp MessageType = 2 + MsgApp MessageType = 3 + MsgAppResp MessageType = 4 + MsgVote MessageType = 5 + MsgVoteResp MessageType = 6 + MsgSnap MessageType = 7 + MsgHeartbeat MessageType = 8 + MsgHeartbeatResp MessageType = 9 + MsgUnreachable MessageType = 10 + MsgSnapStatus MessageType = 11 + MsgCheckQuorum MessageType = 12 + MsgTransferLeader MessageType = 13 + MsgTimeoutNow MessageType = 14 + MsgReadIndex MessageType = 15 + MsgReadIndexResp MessageType = 16 + MsgPreVote MessageType = 17 + MsgPreVoteResp MessageType = 18 + MsgStorageAppend MessageType = 19 + MsgStorageAppendResp MessageType = 20 + MsgStorageApply MessageType = 21 + MsgStorageApplyResp MessageType = 22 + MsgForgetLeader MessageType = 23 ) var MessageType_name = map[int32]string{ @@ -113,28 +118,38 @@ var MessageType_name = map[int32]string{ 16: "MsgReadIndexResp", 17: "MsgPreVote", 18: "MsgPreVoteResp", + 19: "MsgStorageAppend", + 20: "MsgStorageAppendResp", + 21: "MsgStorageApply", + 22: "MsgStorageApplyResp", + 23: "MsgForgetLeader", } var MessageType_value = map[string]int32{ - "MsgHup": 0, - "MsgBeat": 1, - "MsgProp": 2, - "MsgApp": 3, - "MsgAppResp": 4, - "MsgVote": 5, - "MsgVoteResp": 6, - "MsgSnap": 7, - "MsgHeartbeat": 8, - "MsgHeartbeatResp": 9, - "MsgUnreachable": 10, - "MsgSnapStatus": 11, - "MsgCheckQuorum": 12, - "MsgTransferLeader": 13, - "MsgTimeoutNow": 14, - "MsgReadIndex": 15, - "MsgReadIndexResp": 16, - "MsgPreVote": 17, - "MsgPreVoteResp": 18, + "MsgHup": 0, + "MsgBeat": 1, + "MsgProp": 2, + "MsgApp": 3, + "MsgAppResp": 4, + "MsgVote": 5, + "MsgVoteResp": 6, + "MsgSnap": 7, + "MsgHeartbeat": 8, + "MsgHeartbeatResp": 9, + "MsgUnreachable": 10, + "MsgSnapStatus": 11, + "MsgCheckQuorum": 12, + "MsgTransferLeader": 13, + "MsgTimeoutNow": 14, + "MsgReadIndex": 15, + "MsgReadIndexResp": 16, + "MsgPreVote": 17, + "MsgPreVoteResp": 18, + "MsgStorageAppend": 19, + "MsgStorageAppendResp": 20, + "MsgStorageApply": 21, + "MsgStorageApplyResp": 22, + "MsgForgetLeader": 23, } func (x MessageType) Enum() *MessageType { @@ -386,18 +401,37 @@ type Message struct { From uint64 `protobuf:"varint,3,opt,name=from" json:"from"` Term uint64 `protobuf:"varint,4,opt,name=term" json:"term"` // logTerm is generally used for appending Raft logs to followers. For example, - // (type=MsgApp,index=100,logTerm=5) means leader appends entries starting at - // index=101, and the term of entry at index 100 is 5. + // (type=MsgApp,index=100,logTerm=5) means the leader appends entries starting + // at index=101, and the term of the entry at index 100 is 5. // (type=MsgAppResp,reject=true,index=100,logTerm=5) means follower rejects some // entries from its leader as it already has an entry with term 5 at index 100. - LogTerm uint64 `protobuf:"varint,5,opt,name=logTerm" json:"logTerm"` - Index uint64 `protobuf:"varint,6,opt,name=index" json:"index"` - Entries []Entry `protobuf:"bytes,7,rep,name=entries" json:"entries"` - Commit uint64 `protobuf:"varint,8,opt,name=commit" json:"commit"` - Snapshot Snapshot `protobuf:"bytes,9,opt,name=snapshot" json:"snapshot"` - Reject bool `protobuf:"varint,10,opt,name=reject" json:"reject"` - RejectHint uint64 `protobuf:"varint,11,opt,name=rejectHint" json:"rejectHint"` - Context []byte `protobuf:"bytes,12,opt,name=context" json:"context,omitempty"` + // (type=MsgStorageAppendResp,index=100,logTerm=5) means the local node wrote + // entries up to index=100 in stable storage, and the term of the entry at index + // 100 was 5. This doesn't always mean that the corresponding MsgStorageAppend + // message was the one that carried these entries, just that those entries were + // stable at the time of processing the corresponding MsgStorageAppend. + LogTerm uint64 `protobuf:"varint,5,opt,name=logTerm" json:"logTerm"` + Index uint64 `protobuf:"varint,6,opt,name=index" json:"index"` + Entries []Entry `protobuf:"bytes,7,rep,name=entries" json:"entries"` + Commit uint64 `protobuf:"varint,8,opt,name=commit" json:"commit"` + // (type=MsgStorageAppend,vote=5,term=10) means the local node is voting for + // peer 5 in term 10. For MsgStorageAppends, the term, vote, and commit fields + // will either all be set (to facilitate the construction of a HardState) if + // any of the fields have changed or will all be unset if none of the fields + // have changed. + Vote uint64 `protobuf:"varint,13,opt,name=vote" json:"vote"` + // snapshot is non-nil and non-empty for MsgSnap messages and nil for all other + // message types. However, peer nodes running older binary versions may send a + // non-nil, empty value for the snapshot field of non-MsgSnap messages. Code + // should be prepared to handle such messages. + Snapshot *Snapshot `protobuf:"bytes,9,opt,name=snapshot" json:"snapshot,omitempty"` + Reject bool `protobuf:"varint,10,opt,name=reject" json:"reject"` + RejectHint uint64 `protobuf:"varint,11,opt,name=rejectHint" json:"rejectHint"` + Context []byte `protobuf:"bytes,12,opt,name=context" json:"context,omitempty"` + // responses are populated by a raft node to instruct storage threads on how + // to respond and who to respond to when the work associated with a message + // is complete. Populated for MsgStorageAppend and MsgStorageApply messages. + Responses []Message `protobuf:"bytes,14,rep,name=responses" json:"responses"` } func (m *Message) Reset() { *m = Message{} } @@ -695,72 +729,76 @@ func init() { func init() { proto.RegisterFile("raft.proto", fileDescriptor_b042552c306ae59b) } var fileDescriptor_b042552c306ae59b = []byte{ - // 1026 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0xcd, 0x6e, 0xdb, 0x46, - 0x17, 0x25, 0x29, 0x5a, 0x3f, 0x57, 0xb2, 0x3c, 0xbe, 0xf1, 0x17, 0x10, 0x86, 0xc1, 0xe8, 0x53, - 0x52, 0x44, 0x70, 0x11, 0xb7, 0xd0, 0xa2, 0x28, 0xba, 0xf3, 0x4f, 0x00, 0xab, 0xb0, 0xdc, 0x54, - 0x76, 0xbc, 0x28, 0x50, 0x08, 0x63, 0x71, 0x44, 0xb3, 0x15, 0x39, 0x04, 0x39, 0x72, 0xed, 0x4d, - 0x51, 0xf4, 0x09, 0xba, 0xec, 0x26, 0xdb, 0x3e, 0x40, 0x9f, 0xc2, 0x4b, 0x03, 0xdd, 0x74, 0x15, - 0x34, 0xf6, 0x8b, 0x14, 0x33, 0x1c, 0x4a, 0x94, 0x6c, 0x64, 0xd1, 0xdd, 0xcc, 0xb9, 0x67, 0xee, - 0x9c, 0x73, 0xef, 0xe5, 0x10, 0x20, 0xa1, 0x63, 0xb1, 0x13, 0x27, 0x5c, 0x70, 0x2c, 0xcb, 0x75, - 0x7c, 0xbe, 0xb9, 0xe1, 0x73, 0x9f, 0x2b, 0xe8, 0x33, 0xb9, 0xca, 0xa2, 0xed, 0x9f, 0x61, 0xe5, - 0x75, 0x24, 0x92, 0x6b, 0x74, 0xc0, 0x3e, 0x65, 0x49, 0xe8, 0x58, 0x2d, 0xb3, 0x63, 0xef, 0xd9, - 0x37, 0xef, 0x9f, 0x19, 0x03, 0x85, 0xe0, 0x26, 0xac, 0xf4, 0x22, 0x8f, 0x5d, 0x39, 0xa5, 0x42, - 0x28, 0x83, 0xf0, 0x53, 0xb0, 0x4f, 0xaf, 0x63, 0xe6, 0x98, 0x2d, 0xb3, 0xd3, 0xec, 0xae, 0xef, - 0x64, 0x77, 0xed, 0xa8, 0x94, 0x32, 0x30, 0x4b, 0x74, 0x1d, 0x33, 0x44, 0xb0, 0x0f, 0xa8, 0xa0, - 0x8e, 0xdd, 0x32, 0x3b, 0x8d, 0x81, 0x5a, 0xb7, 0x7f, 0x31, 0x81, 0x9c, 0x44, 0x34, 0x4e, 0x2f, - 0xb8, 0xe8, 0x33, 0x41, 0x3d, 0x2a, 0x28, 0x7e, 0x01, 0x30, 0xe2, 0xd1, 0x78, 0x98, 0x0a, 0x2a, - 0xb2, 0xdc, 0xf5, 0x79, 0xee, 0x7d, 0x1e, 0x8d, 0x4f, 0x64, 0x40, 0xe7, 0xae, 0x8d, 0x72, 0x40, - 0x2a, 0x0d, 0x94, 0xd2, 0xa2, 0x89, 0x0c, 0x92, 0xfe, 0x84, 0xf4, 0x57, 0x34, 0xa1, 0x90, 0xf6, - 0x77, 0x50, 0xcd, 0x15, 0x48, 0x89, 0x52, 0x81, 0xba, 0xb3, 0x31, 0x50, 0x6b, 0xfc, 0x0a, 0xaa, - 0xa1, 0x56, 0xa6, 0x12, 0xd7, 0xbb, 0x4e, 0xae, 0x65, 0x59, 0xb9, 0xce, 0x3b, 0xe3, 0xb7, 0xdf, - 0x95, 0xa0, 0xd2, 0x67, 0x69, 0x4a, 0x7d, 0x86, 0xaf, 0xc0, 0x16, 0xf3, 0x5a, 0x3d, 0xc9, 0x73, - 0xe8, 0x70, 0xb1, 0x5a, 0x92, 0x86, 0x1b, 0x60, 0x09, 0xbe, 0xe0, 0xc4, 0x12, 0x5c, 0xda, 0x18, - 0x27, 0x7c, 0xc9, 0x86, 0x44, 0x66, 0x06, 0xed, 0x65, 0x83, 0xe8, 0x42, 0x65, 0xc2, 0x7d, 0xd5, - 0xdd, 0x95, 0x42, 0x30, 0x07, 0xe7, 0x65, 0x2b, 0x3f, 0x2c, 0xdb, 0x2b, 0xa8, 0xb0, 0x48, 0x24, - 0x01, 0x4b, 0x9d, 0x4a, 0xab, 0xd4, 0xa9, 0x77, 0x57, 0x17, 0x7a, 0x9c, 0xa7, 0xd2, 0x1c, 0xdc, - 0x82, 0xf2, 0x88, 0x87, 0x61, 0x20, 0x9c, 0x6a, 0x21, 0x97, 0xc6, 0xb0, 0x0b, 0xd5, 0x54, 0x57, - 0xcc, 0xa9, 0xa9, 0x4a, 0x92, 0xe5, 0x4a, 0xe6, 0x15, 0xcc, 0x79, 0x32, 0x63, 0xc2, 0x7e, 0x60, - 0x23, 0xe1, 0x40, 0xcb, 0xec, 0x54, 0xf3, 0x8c, 0x19, 0x86, 0x2f, 0x00, 0xb2, 0xd5, 0x61, 0x10, - 0x09, 0xa7, 0x5e, 0xb8, 0xb3, 0x80, 0xa3, 0x03, 0x95, 0x11, 0x8f, 0x04, 0xbb, 0x12, 0x4e, 0x43, - 0x35, 0x36, 0xdf, 0xb6, 0xbf, 0x87, 0xda, 0x21, 0x4d, 0xbc, 0x6c, 0x7c, 0xf2, 0x0a, 0x9a, 0x0f, - 0x2a, 0xe8, 0x80, 0x7d, 0xc9, 0x05, 0x5b, 0xfc, 0x38, 0x24, 0x52, 0x30, 0x5c, 0x7a, 0x68, 0xb8, - 0xfd, 0xa7, 0x09, 0xb5, 0xd9, 0xbc, 0xe2, 0x53, 0x28, 0xcb, 0x33, 0x49, 0xea, 0x98, 0xad, 0x52, - 0xc7, 0x1e, 0xe8, 0x1d, 0x6e, 0x42, 0x75, 0xc2, 0x68, 0x12, 0xc9, 0x88, 0xa5, 0x22, 0xb3, 0x3d, - 0xbe, 0x84, 0xb5, 0x8c, 0x35, 0xe4, 0x53, 0xe1, 0xf3, 0x20, 0xf2, 0x9d, 0x92, 0xa2, 0x34, 0x33, - 0xf8, 0x1b, 0x8d, 0xe2, 0x73, 0x58, 0xcd, 0x0f, 0x0d, 0x23, 0xe9, 0xd4, 0x56, 0xb4, 0x46, 0x0e, - 0x1e, 0xb3, 0x2b, 0x81, 0xcf, 0x01, 0xe8, 0x54, 0xf0, 0xe1, 0x84, 0xd1, 0x4b, 0xa6, 0x86, 0x21, - 0x2f, 0x68, 0x4d, 0xe2, 0x47, 0x12, 0x6e, 0xbf, 0x33, 0x01, 0xa4, 0xe8, 0xfd, 0x0b, 0x1a, 0xf9, - 0x0c, 0x3f, 0xd7, 0x63, 0x6b, 0xa9, 0xb1, 0x7d, 0x5a, 0xfc, 0x0c, 0x33, 0xc6, 0x83, 0xc9, 0x7d, - 0x09, 0x95, 0x88, 0x7b, 0x6c, 0x18, 0x78, 0xba, 0x28, 0x4d, 0x19, 0xbc, 0x7b, 0xff, 0xac, 0x7c, - 0xcc, 0x3d, 0xd6, 0x3b, 0x18, 0x94, 0x65, 0xb8, 0xe7, 0x15, 0xfb, 0x62, 0x2f, 0xf4, 0x05, 0x37, - 0xc1, 0x0a, 0x3c, 0xdd, 0x08, 0xd0, 0xa7, 0xad, 0xde, 0xc1, 0xc0, 0x0a, 0xbc, 0x76, 0x08, 0x64, - 0x7e, 0xf9, 0x49, 0x10, 0xf9, 0x93, 0xb9, 0x48, 0xf3, 0xbf, 0x88, 0xb4, 0x3e, 0x26, 0xb2, 0xfd, - 0x87, 0x09, 0x8d, 0x79, 0x9e, 0xb3, 0x2e, 0xee, 0x01, 0x88, 0x84, 0x46, 0x69, 0x20, 0x02, 0x1e, - 0xe9, 0x1b, 0xb7, 0x1e, 0xb9, 0x71, 0xc6, 0xc9, 0x27, 0x72, 0x7e, 0x0a, 0xbf, 0x84, 0xca, 0x48, - 0xb1, 0xb2, 0x8e, 0x17, 0x9e, 0x94, 0x65, 0x6b, 0xf9, 0x17, 0xa6, 0xe9, 0xc5, 0x9a, 0x95, 0x16, - 0x6a, 0xb6, 0x7d, 0x08, 0xb5, 0xd9, 0xbb, 0x8b, 0x6b, 0x50, 0x57, 0x9b, 0x63, 0x9e, 0x84, 0x74, - 0x42, 0x0c, 0x7c, 0x02, 0x6b, 0x0a, 0x98, 0xe7, 0x27, 0x26, 0xfe, 0x0f, 0xd6, 0x97, 0xc0, 0xb3, - 0x2e, 0xb1, 0xb6, 0xff, 0xb2, 0xa0, 0x5e, 0x78, 0x96, 0x10, 0xa0, 0xdc, 0x4f, 0xfd, 0xc3, 0x69, - 0x4c, 0x0c, 0xac, 0x43, 0xa5, 0x9f, 0xfa, 0x7b, 0x8c, 0x0a, 0x62, 0xea, 0xcd, 0x9b, 0x84, 0xc7, - 0xc4, 0xd2, 0xac, 0xdd, 0x38, 0x26, 0x25, 0x6c, 0x02, 0x64, 0xeb, 0x01, 0x4b, 0x63, 0x62, 0x6b, - 0xe2, 0x19, 0x17, 0x8c, 0xac, 0x48, 0x6d, 0x7a, 0xa3, 0xa2, 0x65, 0x1d, 0x95, 0x4f, 0x00, 0xa9, - 0x20, 0x81, 0x86, 0xbc, 0x8c, 0xd1, 0x44, 0x9c, 0xcb, 0x5b, 0xaa, 0xb8, 0x01, 0xa4, 0x88, 0xa8, - 0x43, 0x35, 0x44, 0x68, 0xf6, 0x53, 0xff, 0x6d, 0x94, 0x30, 0x3a, 0xba, 0xa0, 0xe7, 0x13, 0x46, - 0x00, 0xd7, 0x61, 0x55, 0x27, 0x92, 0x5f, 0xdc, 0x34, 0x25, 0x75, 0x4d, 0xdb, 0xbf, 0x60, 0xa3, - 0x1f, 0xbf, 0x9d, 0xf2, 0x64, 0x1a, 0x92, 0x86, 0xb4, 0xdd, 0x4f, 0x7d, 0xd5, 0xa0, 0x31, 0x4b, - 0x8e, 0x18, 0xf5, 0x58, 0x42, 0x56, 0xf5, 0xe9, 0xd3, 0x20, 0x64, 0x7c, 0x2a, 0x8e, 0xf9, 0x4f, - 0xa4, 0xa9, 0xc5, 0x0c, 0x18, 0xf5, 0xd4, 0xff, 0x8e, 0xac, 0x69, 0x31, 0x33, 0x44, 0x89, 0x21, - 0xda, 0xef, 0x9b, 0x84, 0x29, 0x8b, 0xeb, 0xfa, 0x56, 0xbd, 0x57, 0x1c, 0xdc, 0xfe, 0xd5, 0x84, - 0x8d, 0xc7, 0xc6, 0x03, 0xb7, 0xc0, 0x79, 0x0c, 0xdf, 0x9d, 0x0a, 0x4e, 0x0c, 0xfc, 0x04, 0xfe, - 0xff, 0x58, 0xf4, 0x6b, 0x1e, 0x44, 0xa2, 0x17, 0xc6, 0x93, 0x60, 0x14, 0xc8, 0x56, 0x7c, 0x8c, - 0xf6, 0xfa, 0x4a, 0xd3, 0xac, 0xed, 0x6b, 0x68, 0x2e, 0x7e, 0x14, 0xb2, 0x18, 0x73, 0x64, 0xd7, - 0xf3, 0xe4, 0xf8, 0x13, 0x03, 0x9d, 0xa2, 0xd8, 0x01, 0x0b, 0xf9, 0x25, 0x53, 0x11, 0x73, 0x31, - 0xf2, 0x36, 0xf6, 0xa8, 0xc8, 0x22, 0xd6, 0xa2, 0x91, 0x5d, 0xcf, 0x3b, 0xca, 0xde, 0x1e, 0x15, - 0x2d, 0xed, 0xbd, 0xb8, 0xf9, 0xe0, 0x1a, 0xb7, 0x1f, 0x5c, 0xe3, 0xe6, 0xce, 0x35, 0x6f, 0xef, - 0x5c, 0xf3, 0x9f, 0x3b, 0xd7, 0xfc, 0xed, 0xde, 0x35, 0x7e, 0xbf, 0x77, 0x8d, 0xdb, 0x7b, 0xd7, - 0xf8, 0xfb, 0xde, 0x35, 0xfe, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xee, 0xe3, 0x39, 0x8b, 0xbb, 0x08, - 0x00, 0x00, + // 1102 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0xcb, 0x6e, 0x23, 0x45, + 0x14, 0xed, 0x6e, 0x77, 0xfc, 0xb8, 0x76, 0x9c, 0x4a, 0xc5, 0x33, 0xd3, 0x8a, 0x22, 0x8f, 0xf1, + 0x0c, 0x1a, 0x2b, 0x68, 0x02, 0x32, 0x12, 0x42, 0xec, 0xf2, 0x18, 0x94, 0xa0, 0x38, 0x0c, 0x4e, + 0x26, 0x0b, 0x24, 0x14, 0x55, 0xdc, 0x95, 0x4e, 0x83, 0x5d, 0xd5, 0xaa, 0x2e, 0x87, 0x64, 0x83, + 0x10, 0x5f, 0xc0, 0x92, 0x0d, 0x5b, 0x3e, 0x80, 0x8f, 0x40, 0x59, 0x66, 0xc9, 0x6a, 0xc4, 0x24, + 0x7f, 0xc0, 0x17, 0xa0, 0xaa, 0xae, 0x7e, 0xd8, 0x89, 0x66, 0xc1, 0xae, 0xea, 0xdc, 0x53, 0xf7, + 0x9e, 0x7b, 0x6e, 0x57, 0x35, 0x80, 0x20, 0x67, 0x72, 0x23, 0x12, 0x5c, 0x72, 0x5c, 0x56, 0xeb, + 0xe8, 0x74, 0xb5, 0x15, 0xf0, 0x80, 0x6b, 0xe8, 0x63, 0xb5, 0x4a, 0xa2, 0xdd, 0x9f, 0x60, 0xe1, + 0x15, 0x93, 0xe2, 0x0a, 0x7b, 0xe0, 0x1e, 0x51, 0x31, 0xf1, 0x9c, 0x8e, 0xdd, 0x73, 0xb7, 0xdc, + 0xeb, 0xb7, 0x4f, 0xad, 0xa1, 0x46, 0xf0, 0x2a, 0x2c, 0xec, 0x31, 0x9f, 0x5e, 0x7a, 0xa5, 0x42, + 0x28, 0x81, 0xf0, 0x47, 0xe0, 0x1e, 0x5d, 0x45, 0xd4, 0xb3, 0x3b, 0x76, 0xaf, 0xd9, 0x5f, 0xde, + 0x48, 0x6a, 0x6d, 0xe8, 0x94, 0x2a, 0x90, 0x25, 0xba, 0x8a, 0x28, 0xc6, 0xe0, 0xee, 0x10, 0x49, + 0x3c, 0xb7, 0x63, 0xf7, 0x1a, 0x43, 0xbd, 0xee, 0xfe, 0x6c, 0x03, 0x3a, 0x64, 0x24, 0x8a, 0xcf, + 0xb9, 0x1c, 0x50, 0x49, 0x7c, 0x22, 0x09, 0xfe, 0x0c, 0x60, 0xc4, 0xd9, 0xd9, 0x49, 0x2c, 0x89, + 0x4c, 0x72, 0xd7, 0xf3, 0xdc, 0xdb, 0x9c, 0x9d, 0x1d, 0xaa, 0x80, 0xc9, 0x5d, 0x1b, 0xa5, 0x80, + 0x52, 0x1a, 0x6a, 0xa5, 0xc5, 0x26, 0x12, 0x48, 0xf5, 0x27, 0x55, 0x7f, 0xc5, 0x26, 0x34, 0xd2, + 0xfd, 0x16, 0xaa, 0xa9, 0x02, 0x25, 0x51, 0x29, 0xd0, 0x35, 0x1b, 0x43, 0xbd, 0xc6, 0x5f, 0x40, + 0x75, 0x62, 0x94, 0xe9, 0xc4, 0xf5, 0xbe, 0x97, 0x6a, 0x99, 0x57, 0x6e, 0xf2, 0x66, 0xfc, 0xee, + 0xbf, 0x25, 0xa8, 0x0c, 0x68, 0x1c, 0x93, 0x80, 0xe2, 0x97, 0xe0, 0xca, 0xdc, 0xab, 0x95, 0x34, + 0x87, 0x09, 0x17, 0xdd, 0x52, 0x34, 0xdc, 0x02, 0x47, 0xf2, 0x99, 0x4e, 0x1c, 0xc9, 0x55, 0x1b, + 0x67, 0x82, 0xcf, 0xb5, 0xa1, 0x90, 0xac, 0x41, 0x77, 0xbe, 0x41, 0xdc, 0x86, 0xca, 0x98, 0x07, + 0x7a, 0xba, 0x0b, 0x85, 0x60, 0x0a, 0xe6, 0xb6, 0x95, 0xef, 0xdb, 0xf6, 0x12, 0x2a, 0x94, 0x49, + 0x11, 0xd2, 0xd8, 0xab, 0x74, 0x4a, 0xbd, 0x7a, 0x7f, 0x71, 0x66, 0xc6, 0x69, 0x2a, 0xc3, 0xc1, + 0x6b, 0x50, 0x1e, 0xf1, 0xc9, 0x24, 0x94, 0x5e, 0xb5, 0x90, 0xcb, 0x60, 0x4a, 0xe2, 0x05, 0x97, + 0xd4, 0x5b, 0x2c, 0x4a, 0x54, 0x08, 0xee, 0x43, 0x35, 0x36, 0x5e, 0x7a, 0x35, 0xed, 0x31, 0x9a, + 0xf7, 0x58, 0xf3, 0xed, 0x61, 0xc6, 0x53, 0xb5, 0x04, 0xfd, 0x9e, 0x8e, 0xa4, 0x07, 0x1d, 0xbb, + 0x57, 0x4d, 0x6b, 0x25, 0x18, 0x7e, 0x0e, 0x90, 0xac, 0x76, 0x43, 0x26, 0xbd, 0x7a, 0xa1, 0x62, + 0x01, 0x57, 0xd6, 0x8c, 0x38, 0x93, 0xf4, 0x52, 0x7a, 0x0d, 0x35, 0x72, 0x53, 0x24, 0x05, 0xf1, + 0xa7, 0x50, 0x13, 0x34, 0x8e, 0x38, 0x8b, 0x69, 0xec, 0x35, 0xb5, 0x01, 0x4b, 0x73, 0x83, 0x4b, + 0x3f, 0xc3, 0x8c, 0xd7, 0xfd, 0x0e, 0x6a, 0xbb, 0x44, 0xf8, 0xc9, 0x37, 0x99, 0x8e, 0xc5, 0xbe, + 0x37, 0x96, 0xd4, 0x0d, 0xe7, 0x9e, 0x1b, 0xb9, 0x8b, 0xa5, 0xfb, 0x2e, 0x76, 0xff, 0xb4, 0xa1, + 0x96, 0x5d, 0x02, 0xfc, 0x18, 0xca, 0xea, 0x8c, 0x88, 0x3d, 0xbb, 0x53, 0xea, 0xb9, 0x43, 0xb3, + 0xc3, 0xab, 0x50, 0x1d, 0x53, 0x22, 0x98, 0x8a, 0x38, 0x3a, 0x92, 0xed, 0xf1, 0x0b, 0x58, 0x4a, + 0x58, 0x27, 0x7c, 0x2a, 0x03, 0x1e, 0xb2, 0xc0, 0x2b, 0x69, 0x4a, 0x33, 0x81, 0xbf, 0x36, 0x28, + 0x7e, 0x06, 0x8b, 0xe9, 0xa1, 0x13, 0xa6, 0x4c, 0x72, 0x35, 0xad, 0x91, 0x82, 0x07, 0xca, 0xa3, + 0x67, 0x00, 0x64, 0x2a, 0xf9, 0xc9, 0x98, 0x92, 0x0b, 0xaa, 0xbf, 0xb0, 0x74, 0x16, 0x35, 0x85, + 0xef, 0x2b, 0xb8, 0xfb, 0xbb, 0x0d, 0xa0, 0x44, 0x6f, 0x9f, 0x13, 0x16, 0x50, 0xfc, 0x89, 0xb9, + 0x0b, 0x8e, 0xbe, 0x0b, 0x8f, 0x8b, 0x77, 0x3b, 0x61, 0xdc, 0xbb, 0x0e, 0x2f, 0xa0, 0xc2, 0xb8, + 0x4f, 0x4f, 0x42, 0xdf, 0x98, 0xd2, 0x54, 0xc1, 0xdb, 0xb7, 0x4f, 0xcb, 0x07, 0xdc, 0xa7, 0x7b, + 0x3b, 0xc3, 0xb2, 0x0a, 0xef, 0xf9, 0xd8, 0xcb, 0x47, 0x9a, 0x3c, 0x34, 0xd9, 0x30, 0x57, 0xc1, + 0x09, 0x7d, 0x33, 0x08, 0x30, 0xa7, 0x9d, 0xbd, 0x9d, 0xa1, 0x13, 0xfa, 0xdd, 0x09, 0xa0, 0xbc, + 0xf8, 0x61, 0xc8, 0x82, 0x71, 0x2e, 0xd2, 0xfe, 0x3f, 0x22, 0x9d, 0xf7, 0x89, 0xec, 0xfe, 0x61, + 0x43, 0x23, 0xcf, 0x73, 0xdc, 0xc7, 0x5b, 0x00, 0x52, 0x10, 0x16, 0x87, 0x32, 0xe4, 0xcc, 0x54, + 0x5c, 0x7b, 0xa0, 0x62, 0xc6, 0x49, 0x3f, 0xe6, 0xfc, 0x14, 0xfe, 0x1c, 0x2a, 0x23, 0xcd, 0x4a, + 0x26, 0x5e, 0x78, 0xa7, 0xe6, 0x5b, 0x4b, 0xaf, 0xad, 0xa1, 0x17, 0x3d, 0x2b, 0xcd, 0x78, 0xb6, + 0xbe, 0x0b, 0xb5, 0xec, 0x31, 0xc7, 0x4b, 0x50, 0xd7, 0x9b, 0x03, 0x2e, 0x26, 0x64, 0x8c, 0x2c, + 0xbc, 0x02, 0x4b, 0x1a, 0xc8, 0xf3, 0x23, 0x1b, 0x3f, 0x82, 0xe5, 0x39, 0xf0, 0xb8, 0x8f, 0x9c, + 0xf5, 0xbf, 0x4a, 0x50, 0x2f, 0xbc, 0x75, 0x18, 0xa0, 0x3c, 0x88, 0x83, 0xdd, 0x69, 0x84, 0x2c, + 0x5c, 0x87, 0xca, 0x20, 0x0e, 0xb6, 0x28, 0x91, 0xc8, 0x36, 0x9b, 0xd7, 0x82, 0x47, 0xc8, 0x31, + 0xac, 0xcd, 0x28, 0x42, 0x25, 0xdc, 0x04, 0x48, 0xd6, 0x43, 0x1a, 0x47, 0xc8, 0x35, 0xc4, 0x63, + 0x2e, 0x29, 0x5a, 0x50, 0xda, 0xcc, 0x46, 0x47, 0xcb, 0x26, 0xaa, 0x5e, 0x0f, 0x54, 0xc1, 0x08, + 0x1a, 0xaa, 0x18, 0x25, 0x42, 0x9e, 0xaa, 0x2a, 0x55, 0xdc, 0x02, 0x54, 0x44, 0xf4, 0xa1, 0x1a, + 0xc6, 0xd0, 0x1c, 0xc4, 0xc1, 0x1b, 0x26, 0x28, 0x19, 0x9d, 0x93, 0xd3, 0x31, 0x45, 0x80, 0x97, + 0x61, 0xd1, 0x24, 0x52, 0x37, 0x6e, 0x1a, 0xa3, 0xba, 0xa1, 0x6d, 0x9f, 0xd3, 0xd1, 0x0f, 0xdf, + 0x4c, 0xb9, 0x98, 0x4e, 0x50, 0x43, 0xb5, 0x3d, 0x88, 0x03, 0x3d, 0xa0, 0x33, 0x2a, 0xf6, 0x29, + 0xf1, 0xa9, 0x40, 0x8b, 0xe6, 0xf4, 0x51, 0x38, 0xa1, 0x7c, 0x2a, 0x0f, 0xf8, 0x8f, 0xa8, 0x69, + 0xc4, 0x0c, 0x29, 0xf1, 0xf5, 0x4f, 0x14, 0x2d, 0x19, 0x31, 0x19, 0xa2, 0xc5, 0x20, 0xd3, 0xef, + 0x6b, 0x41, 0x75, 0x8b, 0xcb, 0xa6, 0xaa, 0xd9, 0x6b, 0x0e, 0x36, 0x27, 0x0f, 0x25, 0x17, 0x24, + 0xa0, 0x9b, 0x51, 0x44, 0x99, 0x8f, 0x56, 0xb0, 0x07, 0xad, 0x79, 0x54, 0xf3, 0x5b, 0x6a, 0x62, + 0x33, 0x91, 0xf1, 0x15, 0x7a, 0x84, 0x9f, 0xc0, 0xca, 0x1c, 0xa8, 0xd9, 0x8f, 0x0d, 0xfb, 0x4b, + 0x2e, 0x02, 0x2a, 0x4d, 0x47, 0x4f, 0xd6, 0x7f, 0xb1, 0xa1, 0xf5, 0xd0, 0x17, 0x89, 0xd7, 0xc0, + 0x7b, 0x08, 0xdf, 0x9c, 0x4a, 0x8e, 0x2c, 0xfc, 0x21, 0x7c, 0xf0, 0x50, 0xf4, 0x2b, 0x1e, 0x32, + 0xb9, 0x37, 0x89, 0xc6, 0xe1, 0x28, 0x54, 0xd3, 0x7f, 0x1f, 0xed, 0xd5, 0xa5, 0xa1, 0x39, 0xeb, + 0x57, 0xd0, 0x9c, 0xbd, 0x87, 0xca, 0xff, 0x1c, 0xd9, 0xf4, 0x7d, 0x75, 0xe3, 0x90, 0xa5, 0xac, + 0xc8, 0xe1, 0x21, 0x9d, 0xf0, 0x0b, 0xaa, 0x23, 0xf6, 0x6c, 0xe4, 0x4d, 0xe4, 0x13, 0x99, 0x44, + 0x9c, 0xd9, 0x46, 0x36, 0x7d, 0x7f, 0x3f, 0x79, 0xee, 0x74, 0xb4, 0xb4, 0xf5, 0xfc, 0xfa, 0x5d, + 0xdb, 0xba, 0x79, 0xd7, 0xb6, 0xae, 0x6f, 0xdb, 0xf6, 0xcd, 0x6d, 0xdb, 0xfe, 0xe7, 0xb6, 0x6d, + 0xff, 0x7a, 0xd7, 0xb6, 0x7e, 0xbb, 0x6b, 0x5b, 0x37, 0x77, 0x6d, 0xeb, 0xef, 0xbb, 0xb6, 0xf5, + 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x67, 0x2b, 0x47, 0x0c, 0x83, 0x09, 0x00, 0x00, } func (m *Entry) Marshal() (dAtA []byte, err error) { @@ -901,6 +939,23 @@ func (m *Message) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.Responses) > 0 { + for iNdEx := len(m.Responses) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Responses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRaft(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + } + i = encodeVarintRaft(dAtA, i, uint64(m.Vote)) + i-- + dAtA[i] = 0x68 if m.Context != nil { i -= len(m.Context) copy(dAtA[i:], m.Context) @@ -919,16 +974,18 @@ func (m *Message) MarshalToSizedBuffer(dAtA []byte) (int, error) { } i-- dAtA[i] = 0x50 - { - size, err := m.Snapshot.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if m.Snapshot != nil { + { + size, err := m.Snapshot.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRaft(dAtA, i, uint64(size)) } - i -= size - i = encodeVarintRaft(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a } - i-- - dAtA[i] = 0x4a i = encodeVarintRaft(dAtA, i, uint64(m.Commit)) i-- dAtA[i] = 0x40 @@ -1247,14 +1304,23 @@ func (m *Message) Size() (n int) { } } n += 1 + sovRaft(uint64(m.Commit)) - l = m.Snapshot.Size() - n += 1 + l + sovRaft(uint64(l)) + if m.Snapshot != nil { + l = m.Snapshot.Size() + n += 1 + l + sovRaft(uint64(l)) + } n += 2 n += 1 + sovRaft(uint64(m.RejectHint)) if m.Context != nil { l = len(m.Context) n += 1 + l + sovRaft(uint64(l)) } + n += 1 + sovRaft(uint64(m.Vote)) + if len(m.Responses) > 0 { + for _, e := range m.Responses { + l = e.Size() + n += 1 + l + sovRaft(uint64(l)) + } + } return n } @@ -1957,6 +2023,9 @@ func (m *Message) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } + if m.Snapshot == nil { + m.Snapshot = &Snapshot{} + } if err := m.Snapshot.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -2034,6 +2103,59 @@ func (m *Message) Unmarshal(dAtA []byte) error { m.Context = []byte{} } iNdEx = postIndex + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType) + } + m.Vote = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Vote |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Responses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRaft + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Responses = append(m.Responses, Message{}) + if err := m.Responses[len(m.Responses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRaft(dAtA[iNdEx:]) diff --git a/vendor/go.etcd.io/etcd/raft/v3/raftpb/raft.proto b/vendor/go.etcd.io/raft/v3/raftpb/raft.proto similarity index 73% rename from vendor/go.etcd.io/etcd/raft/v3/raftpb/raft.proto rename to vendor/go.etcd.io/raft/v3/raftpb/raft.proto index f46a54c948b12..a8598ee5c9d6f 100644 --- a/vendor/go.etcd.io/etcd/raft/v3/raftpb/raft.proto +++ b/vendor/go.etcd.io/raft/v3/raftpb/raft.proto @@ -37,27 +37,35 @@ message Snapshot { } // For description of different message types, see: -// https://pkg.go.dev/go.etcd.io/etcd/raft/v3#hdr-MessageType +// https://pkg.go.dev/go.etcd.io/raft/v3#hdr-MessageType enum MessageType { - MsgHup = 0; - MsgBeat = 1; - MsgProp = 2; - MsgApp = 3; - MsgAppResp = 4; - MsgVote = 5; - MsgVoteResp = 6; - MsgSnap = 7; - MsgHeartbeat = 8; - MsgHeartbeatResp = 9; - MsgUnreachable = 10; - MsgSnapStatus = 11; - MsgCheckQuorum = 12; - MsgTransferLeader = 13; - MsgTimeoutNow = 14; - MsgReadIndex = 15; - MsgReadIndexResp = 16; - MsgPreVote = 17; - MsgPreVoteResp = 18; + MsgHup = 0; + MsgBeat = 1; + MsgProp = 2; + MsgApp = 3; + MsgAppResp = 4; + MsgVote = 5; + MsgVoteResp = 6; + MsgSnap = 7; + MsgHeartbeat = 8; + MsgHeartbeatResp = 9; + MsgUnreachable = 10; + MsgSnapStatus = 11; + MsgCheckQuorum = 12; + MsgTransferLeader = 13; + MsgTimeoutNow = 14; + MsgReadIndex = 15; + MsgReadIndexResp = 16; + MsgPreVote = 17; + MsgPreVoteResp = 18; + MsgStorageAppend = 19; + MsgStorageAppendResp = 20; + MsgStorageApply = 21; + MsgStorageApplyResp = 22; + MsgForgetLeader = 23; + // NOTE: when adding new message types, remember to update the isLocalMsg and + // isResponseMsg arrays in raft/util.go and update the corresponding tests in + // raft/util_test.go. } message Message { @@ -66,18 +74,37 @@ message Message { optional uint64 from = 3 [(gogoproto.nullable) = false]; optional uint64 term = 4 [(gogoproto.nullable) = false]; // logTerm is generally used for appending Raft logs to followers. For example, - // (type=MsgApp,index=100,logTerm=5) means leader appends entries starting at - // index=101, and the term of entry at index 100 is 5. + // (type=MsgApp,index=100,logTerm=5) means the leader appends entries starting + // at index=101, and the term of the entry at index 100 is 5. // (type=MsgAppResp,reject=true,index=100,logTerm=5) means follower rejects some // entries from its leader as it already has an entry with term 5 at index 100. + // (type=MsgStorageAppendResp,index=100,logTerm=5) means the local node wrote + // entries up to index=100 in stable storage, and the term of the entry at index + // 100 was 5. This doesn't always mean that the corresponding MsgStorageAppend + // message was the one that carried these entries, just that those entries were + // stable at the time of processing the corresponding MsgStorageAppend. optional uint64 logTerm = 5 [(gogoproto.nullable) = false]; optional uint64 index = 6 [(gogoproto.nullable) = false]; repeated Entry entries = 7 [(gogoproto.nullable) = false]; optional uint64 commit = 8 [(gogoproto.nullable) = false]; - optional Snapshot snapshot = 9 [(gogoproto.nullable) = false]; + // (type=MsgStorageAppend,vote=5,term=10) means the local node is voting for + // peer 5 in term 10. For MsgStorageAppends, the term, vote, and commit fields + // will either all be set (to facilitate the construction of a HardState) if + // any of the fields have changed or will all be unset if none of the fields + // have changed. + optional uint64 vote = 13 [(gogoproto.nullable) = false]; + // snapshot is non-nil and non-empty for MsgSnap messages and nil for all other + // message types. However, peer nodes running older binary versions may send a + // non-nil, empty value for the snapshot field of non-MsgSnap messages. Code + // should be prepared to handle such messages. + optional Snapshot snapshot = 9 [(gogoproto.nullable) = true]; optional bool reject = 10 [(gogoproto.nullable) = false]; optional uint64 rejectHint = 11 [(gogoproto.nullable) = false]; - optional bytes context = 12; + optional bytes context = 12 [(gogoproto.nullable) = true]; + // responses are populated by a raft node to instruct storage threads on how + // to respond and who to respond to when the work associated with a message + // is complete. Populated for MsgStorageAppend and MsgStorageApply messages. + repeated Message responses = 14 [(gogoproto.nullable) = false]; } message HardState { @@ -132,13 +159,13 @@ enum ConfChangeType { message ConfChange { optional ConfChangeType type = 2 [(gogoproto.nullable) = false]; - optional uint64 node_id = 3 [(gogoproto.nullable) = false, (gogoproto.customname) = "NodeID" ]; + optional uint64 node_id = 3 [(gogoproto.nullable) = false, (gogoproto.customname) = "NodeID"]; optional bytes context = 4; // NB: this is used only by etcd to thread through a unique identifier. // Ideally it should really use the Context instead. No counterpart to // this field exists in ConfChangeV2. - optional uint64 id = 1 [(gogoproto.nullable) = false, (gogoproto.customname) = "ID" ]; + optional uint64 id = 1 [(gogoproto.nullable) = false, (gogoproto.customname) = "ID"]; } // ConfChangeSingle is an individual configuration change operation. Multiple diff --git a/vendor/go.etcd.io/raft/v3/rawnode.go b/vendor/go.etcd.io/raft/v3/rawnode.go new file mode 100644 index 0000000000000..9ca3fa2f943aa --- /dev/null +++ b/vendor/go.etcd.io/raft/v3/rawnode.go @@ -0,0 +1,559 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "errors" + + pb "go.etcd.io/raft/v3/raftpb" + "go.etcd.io/raft/v3/tracker" +) + +// ErrStepLocalMsg is returned when try to step a local raft message +var ErrStepLocalMsg = errors.New("raft: cannot step raft local message") + +// ErrStepPeerNotFound is returned when try to step a response message +// but there is no peer found in raft.trk for that node. +var ErrStepPeerNotFound = errors.New("raft: cannot step as peer not found") + +// RawNode is a thread-unsafe Node. +// The methods of this struct correspond to the methods of Node and are described +// more fully there. +type RawNode struct { + raft *raft + asyncStorageWrites bool + + // Mutable fields. + prevSoftSt *SoftState + prevHardSt pb.HardState + stepsOnAdvance []pb.Message +} + +// NewRawNode instantiates a RawNode from the given configuration. +// +// See Bootstrap() for bootstrapping an initial state; this replaces the former +// 'peers' argument to this method (with identical behavior). However, It is +// recommended that instead of calling Bootstrap, applications bootstrap their +// state manually by setting up a Storage that has a first index > 1 and which +// stores the desired ConfState as its InitialState. +func NewRawNode(config *Config) (*RawNode, error) { + r := newRaft(config) + rn := &RawNode{ + raft: r, + } + rn.asyncStorageWrites = config.AsyncStorageWrites + ss := r.softState() + rn.prevSoftSt = &ss + rn.prevHardSt = r.hardState() + return rn, nil +} + +// Tick advances the internal logical clock by a single tick. +func (rn *RawNode) Tick() { + rn.raft.tick() +} + +// TickQuiesced advances the internal logical clock by a single tick without +// performing any other state machine processing. It allows the caller to avoid +// periodic heartbeats and elections when all of the peers in a Raft group are +// known to be at the same state. Expected usage is to periodically invoke Tick +// or TickQuiesced depending on whether the group is "active" or "quiesced". +// +// WARNING: Be very careful about using this method as it subverts the Raft +// state machine. You should probably be using Tick instead. +// +// DEPRECATED: This method will be removed in a future release. +func (rn *RawNode) TickQuiesced() { + rn.raft.electionElapsed++ +} + +// Campaign causes this RawNode to transition to candidate state. +func (rn *RawNode) Campaign() error { + return rn.raft.Step(pb.Message{ + Type: pb.MsgHup, + }) +} + +// Propose proposes data be appended to the raft log. +func (rn *RawNode) Propose(data []byte) error { + return rn.raft.Step(pb.Message{ + Type: pb.MsgProp, + From: rn.raft.id, + Entries: []pb.Entry{ + {Data: data}, + }}) +} + +// ProposeConfChange proposes a config change. See (Node).ProposeConfChange for +// details. +func (rn *RawNode) ProposeConfChange(cc pb.ConfChangeI) error { + m, err := confChangeToMsg(cc) + if err != nil { + return err + } + return rn.raft.Step(m) +} + +// ApplyConfChange applies a config change to the local node. The app must call +// this when it applies a configuration change, except when it decides to reject +// the configuration change, in which case no call must take place. +func (rn *RawNode) ApplyConfChange(cc pb.ConfChangeI) *pb.ConfState { + cs := rn.raft.applyConfChange(cc.AsV2()) + return &cs +} + +// Step advances the state machine using the given message. +func (rn *RawNode) Step(m pb.Message) error { + // Ignore unexpected local messages receiving over network. + if IsLocalMsg(m.Type) && !IsLocalMsgTarget(m.From) { + return ErrStepLocalMsg + } + if IsResponseMsg(m.Type) && !IsLocalMsgTarget(m.From) && rn.raft.trk.Progress[m.From] == nil { + return ErrStepPeerNotFound + } + return rn.raft.Step(m) +} + +// Ready returns the outstanding work that the application needs to handle. This +// includes appending and applying entries or a snapshot, updating the HardState, +// and sending messages. The returned Ready() *must* be handled and subsequently +// passed back via Advance(). +func (rn *RawNode) Ready() Ready { + rd := rn.readyWithoutAccept() + rn.acceptReady(rd) + return rd +} + +// readyWithoutAccept returns a Ready. This is a read-only operation, i.e. there +// is no obligation that the Ready must be handled. +func (rn *RawNode) readyWithoutAccept() Ready { + r := rn.raft + + rd := Ready{ + Entries: r.raftLog.nextUnstableEnts(), + CommittedEntries: r.raftLog.nextCommittedEnts(rn.applyUnstableEntries()), + Messages: r.msgs, + } + if softSt := r.softState(); !softSt.equal(rn.prevSoftSt) { + // Allocate only when SoftState changes. + escapingSoftSt := softSt + rd.SoftState = &escapingSoftSt + } + if hardSt := r.hardState(); !isHardStateEqual(hardSt, rn.prevHardSt) { + rd.HardState = hardSt + } + if r.raftLog.hasNextUnstableSnapshot() { + rd.Snapshot = *r.raftLog.nextUnstableSnapshot() + } + if len(r.readStates) != 0 { + rd.ReadStates = r.readStates + } + rd.MustSync = MustSync(r.hardState(), rn.prevHardSt, len(rd.Entries)) + + if rn.asyncStorageWrites { + // If async storage writes are enabled, enqueue messages to + // local storage threads, where applicable. + if needStorageAppendMsg(r, rd) { + m := newStorageAppendMsg(r, rd) + rd.Messages = append(rd.Messages, m) + } + if needStorageApplyMsg(rd) { + m := newStorageApplyMsg(r, rd) + rd.Messages = append(rd.Messages, m) + } + } else { + // If async storage writes are disabled, immediately enqueue + // msgsAfterAppend to be sent out. The Ready struct contract + // mandates that Messages cannot be sent until after Entries + // are written to stable storage. + for _, m := range r.msgsAfterAppend { + if m.To != r.id { + rd.Messages = append(rd.Messages, m) + } + } + } + + return rd +} + +// MustSync returns true if the hard state and count of Raft entries indicate +// that a synchronous write to persistent storage is required. +func MustSync(st, prevst pb.HardState, entsnum int) bool { + // Persistent state on all servers: + // (Updated on stable storage before responding to RPCs) + // currentTerm + // votedFor + // log entries[] + return entsnum != 0 || st.Vote != prevst.Vote || st.Term != prevst.Term +} + +func needStorageAppendMsg(r *raft, rd Ready) bool { + // Return true if log entries, hard state, or a snapshot need to be written + // to stable storage. Also return true if any messages are contingent on all + // prior MsgStorageAppend being processed. + return len(rd.Entries) > 0 || + !IsEmptyHardState(rd.HardState) || + !IsEmptySnap(rd.Snapshot) || + len(r.msgsAfterAppend) > 0 +} + +func needStorageAppendRespMsg(r *raft, rd Ready) bool { + // Return true if raft needs to hear about stabilized entries or an applied + // snapshot. See the comment in newStorageAppendRespMsg, which explains why + // we check hasNextOrInProgressUnstableEnts instead of len(rd.Entries) > 0. + return r.raftLog.hasNextOrInProgressUnstableEnts() || + !IsEmptySnap(rd.Snapshot) +} + +// newStorageAppendMsg creates the message that should be sent to the local +// append thread to instruct it to append log entries, write an updated hard +// state, and apply a snapshot. The message also carries a set of responses +// that should be delivered after the rest of the message is processed. Used +// with AsyncStorageWrites. +func newStorageAppendMsg(r *raft, rd Ready) pb.Message { + m := pb.Message{ + Type: pb.MsgStorageAppend, + To: LocalAppendThread, + From: r.id, + Entries: rd.Entries, + } + if !IsEmptyHardState(rd.HardState) { + // If the Ready includes a HardState update, assign each of its fields + // to the corresponding fields in the Message. This allows clients to + // reconstruct the HardState and save it to stable storage. + // + // If the Ready does not include a HardState update, make sure to not + // assign a value to any of the fields so that a HardState reconstructed + // from them will be empty (return true from raft.IsEmptyHardState). + m.Term = rd.Term + m.Vote = rd.Vote + m.Commit = rd.Commit + } + if !IsEmptySnap(rd.Snapshot) { + snap := rd.Snapshot + m.Snapshot = &snap + } + // Attach all messages in msgsAfterAppend as responses to be delivered after + // the message is processed, along with a self-directed MsgStorageAppendResp + // to acknowledge the entry stability. + // + // NB: it is important for performance that MsgStorageAppendResp message be + // handled after self-directed MsgAppResp messages on the leader (which will + // be contained in msgsAfterAppend). This ordering allows the MsgAppResp + // handling to use a fast-path in r.raftLog.term() before the newly appended + // entries are removed from the unstable log. + m.Responses = r.msgsAfterAppend + if needStorageAppendRespMsg(r, rd) { + m.Responses = append(m.Responses, newStorageAppendRespMsg(r, rd)) + } + return m +} + +// newStorageAppendRespMsg creates the message that should be returned to node +// after the unstable log entries, hard state, and snapshot in the current Ready +// (along with those in all prior Ready structs) have been saved to stable +// storage. +func newStorageAppendRespMsg(r *raft, rd Ready) pb.Message { + m := pb.Message{ + Type: pb.MsgStorageAppendResp, + To: r.id, + From: LocalAppendThread, + // Dropped after term change, see below. + Term: r.Term, + } + if r.raftLog.hasNextOrInProgressUnstableEnts() { + // If the raft log has unstable entries, attach the last index and term of the + // append to the response message. This (index, term) tuple will be handed back + // and consulted when the stability of those log entries is signaled to the + // unstable. If the (index, term) match the unstable log by the time the + // response is received (unstable.stableTo), the unstable log can be truncated. + // + // However, with just this logic, there would be an ABA problem[^1] that could + // lead to the unstable log and the stable log getting out of sync temporarily + // and leading to an inconsistent view. Consider the following example with 5 + // nodes, A B C D E: + // + // 1. A is the leader. + // 2. A proposes some log entries but only B receives these entries. + // 3. B gets the Ready and the entries are appended asynchronously. + // 4. A crashes and C becomes leader after getting a vote from D and E. + // 5. C proposes some log entries and B receives these entries, overwriting the + // previous unstable log entries that are in the process of being appended. + // The entries have a larger term than the previous entries but the same + // indexes. It begins appending these new entries asynchronously. + // 6. C crashes and A restarts and becomes leader again after getting the vote + // from D and E. + // 7. B receives the entries from A which are the same as the ones from step 2, + // overwriting the previous unstable log entries that are in the process of + // being appended from step 5. The entries have the original terms and + // indexes from step 2. Recall that log entries retain their original term + // numbers when a leader replicates entries from previous terms. It begins + // appending these new entries asynchronously. + // 8. The asynchronous log appends from the first Ready complete and stableTo + // is called. + // 9. However, the log entries from the second Ready are still in the + // asynchronous append pipeline and will overwrite (in stable storage) the + // entries from the first Ready at some future point. We can't truncate the + // unstable log yet or a future read from Storage might see the entries from + // step 5 before they have been replaced by the entries from step 7. + // Instead, we must wait until we are sure that the entries are stable and + // that no in-progress appends might overwrite them before removing entries + // from the unstable log. + // + // To prevent these kinds of problems, we also attach the current term to the + // MsgStorageAppendResp (above). If the term has changed by the time the + // MsgStorageAppendResp if returned, the response is ignored and the unstable + // log is not truncated. The unstable log is only truncated when the term has + // remained unchanged from the time that the MsgStorageAppend was sent to the + // time that the MsgStorageAppendResp is received, indicating that no-one else + // is in the process of truncating the stable log. + // + // However, this replaces a correctness problem with a liveness problem. If we + // only attempted to truncate the unstable log when appending new entries but + // also occasionally dropped these responses, then quiescence of new log entries + // could lead to the unstable log never being truncated. + // + // To combat this, we attempt to truncate the log on all MsgStorageAppendResp + // messages where the unstable log is not empty, not just those associated with + // entry appends. This includes MsgStorageAppendResp messages associated with an + // updated HardState, which occur after a term change. + // + // In other words, we set Index and LogTerm in a block that looks like: + // + // if r.raftLog.hasNextOrInProgressUnstableEnts() { ... } + // + // not like: + // + // if len(rd.Entries) > 0 { ... } + // + // To do so, we attach r.raftLog.lastIndex() and r.raftLog.lastTerm(), not the + // (index, term) of the last entry in rd.Entries. If rd.Entries is not empty, + // these will be the same. However, if rd.Entries is empty, we still want to + // attest that this (index, term) is correct at the current term, in case the + // MsgStorageAppend that contained the last entry in the unstable slice carried + // an earlier term and was dropped. + // + // A MsgStorageAppend with a new term is emitted on each term change. This is + // the same condition that causes MsgStorageAppendResp messages with earlier + // terms to be ignored. As a result, we are guaranteed that, assuming a bounded + // number of term changes, there will eventually be a MsgStorageAppendResp + // message that is not ignored. This means that entries in the unstable log + // which have been appended to stable storage will eventually be truncated and + // dropped from memory. + // + // [^1]: https://en.wikipedia.org/wiki/ABA_problem + m.Index = r.raftLog.lastIndex() + m.LogTerm = r.raftLog.lastTerm() + } + if !IsEmptySnap(rd.Snapshot) { + snap := rd.Snapshot + m.Snapshot = &snap + } + return m +} + +func needStorageApplyMsg(rd Ready) bool { return len(rd.CommittedEntries) > 0 } +func needStorageApplyRespMsg(rd Ready) bool { return needStorageApplyMsg(rd) } + +// newStorageApplyMsg creates the message that should be sent to the local +// apply thread to instruct it to apply committed log entries. The message +// also carries a response that should be delivered after the rest of the +// message is processed. Used with AsyncStorageWrites. +func newStorageApplyMsg(r *raft, rd Ready) pb.Message { + ents := rd.CommittedEntries + return pb.Message{ + Type: pb.MsgStorageApply, + To: LocalApplyThread, + From: r.id, + Term: 0, // committed entries don't apply under a specific term + Entries: ents, + Responses: []pb.Message{ + newStorageApplyRespMsg(r, ents), + }, + } +} + +// newStorageApplyRespMsg creates the message that should be returned to node +// after the committed entries in the current Ready (along with those in all +// prior Ready structs) have been applied to the local state machine. +func newStorageApplyRespMsg(r *raft, ents []pb.Entry) pb.Message { + return pb.Message{ + Type: pb.MsgStorageApplyResp, + To: r.id, + From: LocalApplyThread, + Term: 0, // committed entries don't apply under a specific term + Entries: ents, + } +} + +// acceptReady is called when the consumer of the RawNode has decided to go +// ahead and handle a Ready. Nothing must alter the state of the RawNode between +// this call and the prior call to Ready(). +func (rn *RawNode) acceptReady(rd Ready) { + if rd.SoftState != nil { + rn.prevSoftSt = rd.SoftState + } + if !IsEmptyHardState(rd.HardState) { + rn.prevHardSt = rd.HardState + } + if len(rd.ReadStates) != 0 { + rn.raft.readStates = nil + } + if !rn.asyncStorageWrites { + if len(rn.stepsOnAdvance) != 0 { + rn.raft.logger.Panicf("two accepted Ready structs without call to Advance") + } + for _, m := range rn.raft.msgsAfterAppend { + if m.To == rn.raft.id { + rn.stepsOnAdvance = append(rn.stepsOnAdvance, m) + } + } + if needStorageAppendRespMsg(rn.raft, rd) { + m := newStorageAppendRespMsg(rn.raft, rd) + rn.stepsOnAdvance = append(rn.stepsOnAdvance, m) + } + if needStorageApplyRespMsg(rd) { + m := newStorageApplyRespMsg(rn.raft, rd.CommittedEntries) + rn.stepsOnAdvance = append(rn.stepsOnAdvance, m) + } + } + rn.raft.msgs = nil + rn.raft.msgsAfterAppend = nil + rn.raft.raftLog.acceptUnstable() + if len(rd.CommittedEntries) > 0 { + ents := rd.CommittedEntries + index := ents[len(ents)-1].Index + rn.raft.raftLog.acceptApplying(index, entsSize(ents), rn.applyUnstableEntries()) + } +} + +// applyUnstableEntries returns whether entries are allowed to be applied once +// they are known to be committed but before they have been written locally to +// stable storage. +func (rn *RawNode) applyUnstableEntries() bool { + return !rn.asyncStorageWrites +} + +// HasReady called when RawNode user need to check if any Ready pending. +func (rn *RawNode) HasReady() bool { + // TODO(nvanbenschoten): order these cases in terms of cost and frequency. + r := rn.raft + if softSt := r.softState(); !softSt.equal(rn.prevSoftSt) { + return true + } + if hardSt := r.hardState(); !IsEmptyHardState(hardSt) && !isHardStateEqual(hardSt, rn.prevHardSt) { + return true + } + if r.raftLog.hasNextUnstableSnapshot() { + return true + } + if len(r.msgs) > 0 || len(r.msgsAfterAppend) > 0 { + return true + } + if r.raftLog.hasNextUnstableEnts() || r.raftLog.hasNextCommittedEnts(rn.applyUnstableEntries()) { + return true + } + if len(r.readStates) != 0 { + return true + } + return false +} + +// Advance notifies the RawNode that the application has applied and saved progress in the +// last Ready results. +// +// NOTE: Advance must not be called when using AsyncStorageWrites. Response messages from +// the local append and apply threads take its place. +func (rn *RawNode) Advance(_ Ready) { + // The actions performed by this function are encoded into stepsOnAdvance in + // acceptReady. In earlier versions of this library, they were computed from + // the provided Ready struct. Retain the unused parameter for compatibility. + if rn.asyncStorageWrites { + rn.raft.logger.Panicf("Advance must not be called when using AsyncStorageWrites") + } + for i, m := range rn.stepsOnAdvance { + _ = rn.raft.Step(m) + rn.stepsOnAdvance[i] = pb.Message{} + } + rn.stepsOnAdvance = rn.stepsOnAdvance[:0] +} + +// Status returns the current status of the given group. This allocates, see +// BasicStatus and WithProgress for allocation-friendlier choices. +func (rn *RawNode) Status() Status { + status := getStatus(rn.raft) + return status +} + +// BasicStatus returns a BasicStatus. Notably this does not contain the +// Progress map; see WithProgress for an allocation-free way to inspect it. +func (rn *RawNode) BasicStatus() BasicStatus { + return getBasicStatus(rn.raft) +} + +// ProgressType indicates the type of replica a Progress corresponds to. +type ProgressType byte + +const ( + // ProgressTypePeer accompanies a Progress for a regular peer replica. + ProgressTypePeer ProgressType = iota + // ProgressTypeLearner accompanies a Progress for a learner replica. + ProgressTypeLearner +) + +// WithProgress is a helper to introspect the Progress for this node and its +// peers. +func (rn *RawNode) WithProgress(visitor func(id uint64, typ ProgressType, pr tracker.Progress)) { + rn.raft.trk.Visit(func(id uint64, pr *tracker.Progress) { + typ := ProgressTypePeer + if pr.IsLearner { + typ = ProgressTypeLearner + } + p := *pr + p.Inflights = nil + visitor(id, typ, p) + }) +} + +// ReportUnreachable reports the given node is not reachable for the last send. +func (rn *RawNode) ReportUnreachable(id uint64) { + _ = rn.raft.Step(pb.Message{Type: pb.MsgUnreachable, From: id}) +} + +// ReportSnapshot reports the status of the sent snapshot. +func (rn *RawNode) ReportSnapshot(id uint64, status SnapshotStatus) { + rej := status == SnapshotFailure + + _ = rn.raft.Step(pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej}) +} + +// TransferLeader tries to transfer leadership to the given transferee. +func (rn *RawNode) TransferLeader(transferee uint64) { + _ = rn.raft.Step(pb.Message{Type: pb.MsgTransferLeader, From: transferee}) +} + +// ForgetLeader forgets a follower's current leader, changing it to None. +// See (Node).ForgetLeader for details. +func (rn *RawNode) ForgetLeader() error { + return rn.raft.Step(pb.Message{Type: pb.MsgForgetLeader}) +} + +// ReadIndex requests a read state. The read state will be set in ready. +// Read State has a read index. Once the application advances further than the read +// index, any linearizable read requests issued before the read request can be +// processed safely. The read state will have the same rctx attached. +func (rn *RawNode) ReadIndex(rctx []byte) { + _ = rn.raft.Step(pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}}) +} diff --git a/vendor/go.etcd.io/etcd/raft/v3/read_only.go b/vendor/go.etcd.io/raft/v3/read_only.go similarity index 97% rename from vendor/go.etcd.io/etcd/raft/v3/read_only.go rename to vendor/go.etcd.io/raft/v3/read_only.go index ad0688522d6f7..5a53f0ca80207 100644 --- a/vendor/go.etcd.io/etcd/raft/v3/read_only.go +++ b/vendor/go.etcd.io/raft/v3/read_only.go @@ -14,7 +14,7 @@ package raft -import pb "go.etcd.io/etcd/raft/v3/raftpb" +import pb "go.etcd.io/raft/v3/raftpb" // ReadState provides state for read only query. // It's caller's responsibility to call ReadIndex first before getting @@ -85,7 +85,7 @@ func (ro *readOnly) advance(m pb.Message) []*readIndexStatus { ) ctx := string(m.Context) - rss := []*readIndexStatus{} + var rss []*readIndexStatus for _, okctx := range ro.readIndexQueue { i++ diff --git a/vendor/go.etcd.io/etcd/raft/v3/status.go b/vendor/go.etcd.io/raft/v3/status.go similarity index 92% rename from vendor/go.etcd.io/etcd/raft/v3/status.go rename to vendor/go.etcd.io/raft/v3/status.go index acfb56c3915d9..ded0c0ec6c56c 100644 --- a/vendor/go.etcd.io/etcd/raft/v3/status.go +++ b/vendor/go.etcd.io/raft/v3/status.go @@ -17,8 +17,8 @@ package raft import ( "fmt" - pb "go.etcd.io/etcd/raft/v3/raftpb" - "go.etcd.io/etcd/raft/v3/tracker" + pb "go.etcd.io/raft/v3/raftpb" + "go.etcd.io/raft/v3/tracker" ) // Status contains information about this Raft peer and its view of the system. @@ -43,7 +43,7 @@ type BasicStatus struct { func getProgressCopy(r *raft) map[uint64]tracker.Progress { m := make(map[uint64]tracker.Progress) - r.prs.Visit(func(id uint64, pr *tracker.Progress) { + r.trk.Visit(func(id uint64, pr *tracker.Progress) { p := *pr p.Inflights = pr.Inflights.Clone() pr = nil @@ -59,7 +59,7 @@ func getBasicStatus(r *raft) BasicStatus { LeadTransferee: r.leadTransferee, } s.HardState = r.hardState() - s.SoftState = *r.softState() + s.SoftState = r.softState() s.Applied = r.raftLog.applied return s } @@ -71,7 +71,7 @@ func getStatus(r *raft) Status { if s.RaftState == StateLeader { s.Progress = getProgressCopy(r) } - s.Config = r.prs.Config.Clone() + s.Config = r.trk.Config.Clone() return s } diff --git a/vendor/go.etcd.io/etcd/raft/v3/storage.go b/vendor/go.etcd.io/raft/v3/storage.go similarity index 78% rename from vendor/go.etcd.io/etcd/raft/v3/storage.go rename to vendor/go.etcd.io/raft/v3/storage.go index 8b16d4fa24faf..f616c3150fbdf 100644 --- a/vendor/go.etcd.io/etcd/raft/v3/storage.go +++ b/vendor/go.etcd.io/raft/v3/storage.go @@ -18,7 +18,7 @@ import ( "errors" "sync" - pb "go.etcd.io/etcd/raft/v3/raftpb" + pb "go.etcd.io/raft/v3/raftpb" ) // ErrCompacted is returned by Storage.Entries/Compact when a requested @@ -48,10 +48,28 @@ type Storage interface { // InitialState returns the saved HardState and ConfState information. InitialState() (pb.HardState, pb.ConfState, error) - // Entries returns a slice of log entries in the range [lo,hi). - // MaxSize limits the total size of the log entries returned, but - // Entries returns at least one entry if any. + + // Entries returns a slice of consecutive log entries in the range [lo, hi), + // starting from lo. The maxSize limits the total size of the log entries + // returned, but Entries returns at least one entry if any. + // + // The caller of Entries owns the returned slice, and may append to it. The + // individual entries in the slice must not be mutated, neither by the Storage + // implementation nor the caller. Note that raft may forward these entries + // back to the application via Ready struct, so the corresponding handler must + // not mutate entries either (see comments in Ready struct). + // + // Since the caller may append to the returned slice, Storage implementation + // must protect its state from corruption that such appends may cause. For + // example, common ways to do so are: + // - allocate the slice before returning it (safest option), + // - return a slice protected by Go full slice expression, which causes + // copying on appends (see MemoryStorage). + // + // Returns ErrCompacted if entry lo has been compacted, or ErrUnavailable if + // encountered an unavailable entry in [lo, hi). Entries(lo, hi, maxSize uint64) ([]pb.Entry, error) + // Term returns the term of entry i, which must be in the range // [FirstIndex()-1, LastIndex()]. The term of the entry before // FirstIndex is retained for matching purposes even though the @@ -71,6 +89,10 @@ type Storage interface { Snapshot() (pb.Snapshot, error) } +type inMemStorageCallStats struct { + initialState, firstIndex, lastIndex, entries, term, snapshot int +} + // MemoryStorage implements the Storage interface backed by an // in-memory array. type MemoryStorage struct { @@ -83,6 +105,8 @@ type MemoryStorage struct { snapshot pb.Snapshot // ents[i] has raft log position i+snapshot.Metadata.Index ents []pb.Entry + + callStats inMemStorageCallStats } // NewMemoryStorage creates an empty MemoryStorage. @@ -95,6 +119,7 @@ func NewMemoryStorage() *MemoryStorage { // InitialState implements the Storage interface. func (ms *MemoryStorage) InitialState() (pb.HardState, pb.ConfState, error) { + ms.callStats.initialState++ return ms.hardState, ms.snapshot.Metadata.ConfState, nil } @@ -110,6 +135,7 @@ func (ms *MemoryStorage) SetHardState(st pb.HardState) error { func (ms *MemoryStorage) Entries(lo, hi, maxSize uint64) ([]pb.Entry, error) { ms.Lock() defer ms.Unlock() + ms.callStats.entries++ offset := ms.ents[0].Index if lo <= offset { return nil, ErrCompacted @@ -122,14 +148,18 @@ func (ms *MemoryStorage) Entries(lo, hi, maxSize uint64) ([]pb.Entry, error) { return nil, ErrUnavailable } - ents := ms.ents[lo-offset : hi-offset] - return limitSize(ents, maxSize), nil + ents := limitSize(ms.ents[lo-offset:hi-offset], entryEncodingSize(maxSize)) + // NB: use the full slice expression to limit what the caller can do with the + // returned slice. For example, an append will reallocate and copy this slice + // instead of corrupting the neighbouring ms.ents. + return ents[:len(ents):len(ents)], nil } // Term implements the Storage interface. func (ms *MemoryStorage) Term(i uint64) (uint64, error) { ms.Lock() defer ms.Unlock() + ms.callStats.term++ offset := ms.ents[0].Index if i < offset { return 0, ErrCompacted @@ -144,6 +174,7 @@ func (ms *MemoryStorage) Term(i uint64) (uint64, error) { func (ms *MemoryStorage) LastIndex() (uint64, error) { ms.Lock() defer ms.Unlock() + ms.callStats.lastIndex++ return ms.lastIndex(), nil } @@ -155,6 +186,7 @@ func (ms *MemoryStorage) lastIndex() uint64 { func (ms *MemoryStorage) FirstIndex() (uint64, error) { ms.Lock() defer ms.Unlock() + ms.callStats.firstIndex++ return ms.firstIndex(), nil } @@ -166,6 +198,7 @@ func (ms *MemoryStorage) firstIndex() uint64 { func (ms *MemoryStorage) Snapshot() (pb.Snapshot, error) { ms.Lock() defer ms.Unlock() + ms.callStats.snapshot++ return ms.snapshot, nil } @@ -227,7 +260,10 @@ func (ms *MemoryStorage) Compact(compactIndex uint64) error { } i := compactIndex - offset - ents := make([]pb.Entry, 1, 1+uint64(len(ms.ents))-i) + // NB: allocate a new slice instead of reusing the old ms.ents. Entries in + // ms.ents are immutable, and can be referenced from outside MemoryStorage + // through slices returned by ms.Entries(). + ents := make([]pb.Entry, 1, uint64(len(ms.ents))-i) ents[0].Index = ms.ents[i].Index ents[0].Term = ms.ents[i].Term ents = append(ents, ms.ents[i+1:]...) @@ -261,8 +297,9 @@ func (ms *MemoryStorage) Append(entries []pb.Entry) error { offset := entries[0].Index - ms.ents[0].Index switch { case uint64(len(ms.ents)) > offset: - ms.ents = append([]pb.Entry{}, ms.ents[:offset]...) - ms.ents = append(ms.ents, entries...) + // NB: full slice expression protects ms.ents at index >= offset from + // rewrites, as they may still be referenced from outside MemoryStorage. + ms.ents = append(ms.ents[:offset:offset], entries...) case uint64(len(ms.ents)) == offset: ms.ents = append(ms.ents, entries...) default: diff --git a/vendor/go.etcd.io/etcd/raft/v3/tracker/inflights.go b/vendor/go.etcd.io/raft/v3/tracker/inflights.go similarity index 62% rename from vendor/go.etcd.io/etcd/raft/v3/tracker/inflights.go rename to vendor/go.etcd.io/raft/v3/tracker/inflights.go index 1a056341ab517..cb091e542a3ec 100644 --- a/vendor/go.etcd.io/etcd/raft/v3/tracker/inflights.go +++ b/vendor/go.etcd.io/raft/v3/tracker/inflights.go @@ -14,6 +14,12 @@ package tracker +// inflight describes an in-flight MsgApp message. +type inflight struct { + index uint64 // the index of the last entry inside the message + bytes uint64 // the total byte size of the entries in the message +} + // Inflights limits the number of MsgApp (represented by the largest index // contained within) sent to followers but not yet acknowledged by them. Callers // use Full() to check whether more messages can be sent, call Add() whenever @@ -22,21 +28,25 @@ package tracker type Inflights struct { // the starting index in the buffer start int - // number of inflights in the buffer - count int - // the size of the buffer - size int + count int // number of inflight messages in the buffer + bytes uint64 // number of inflight bytes + + size int // the max number of inflight messages + maxBytes uint64 // the max total byte size of inflight messages - // buffer contains the index of the last entry - // inside one message. - buffer []uint64 + // buffer is a ring buffer containing info about all in-flight messages. + buffer []inflight } -// NewInflights sets up an Inflights that allows up to 'size' inflight messages. -func NewInflights(size int) *Inflights { +// NewInflights sets up an Inflights that allows up to size inflight messages, +// with the total byte size up to maxBytes. If maxBytes is 0 then there is no +// byte size limit. The maxBytes limit is soft, i.e. we accept a single message +// that brings it from size < maxBytes to size >= maxBytes. +func NewInflights(size int, maxBytes uint64) *Inflights { return &Inflights{ - size: size, + size: size, + maxBytes: maxBytes, } } @@ -44,15 +54,15 @@ func NewInflights(size int) *Inflights { // the receiver. func (in *Inflights) Clone() *Inflights { ins := *in - ins.buffer = append([]uint64(nil), in.buffer...) + ins.buffer = append([]inflight(nil), in.buffer...) return &ins } -// Add notifies the Inflights that a new message with the given index is being -// dispatched. Full() must be called prior to Add() to verify that there is room -// for one more message, and consecutive calls to add Add() must provide a -// monotonic sequence of indexes. -func (in *Inflights) Add(inflight uint64) { +// Add notifies the Inflights that a new message with the given index and byte +// size is being dispatched. Full() must be called prior to Add() to verify that +// there is room for one more message, and consecutive calls to Add() must +// provide a monotonic sequence of indexes. +func (in *Inflights) Add(index, bytes uint64) { if in.Full() { panic("cannot add into a Full inflights") } @@ -64,8 +74,9 @@ func (in *Inflights) Add(inflight uint64) { if next >= len(in.buffer) { in.grow() } - in.buffer[next] = inflight + in.buffer[next] = inflight{index: index, bytes: bytes} in.count++ + in.bytes += bytes } // grow the inflight buffer by doubling up to inflights.size. We grow on demand @@ -78,24 +89,26 @@ func (in *Inflights) grow() { } else if newSize > in.size { newSize = in.size } - newBuffer := make([]uint64, newSize) + newBuffer := make([]inflight, newSize) copy(newBuffer, in.buffer) in.buffer = newBuffer } // FreeLE frees the inflights smaller or equal to the given `to` flight. func (in *Inflights) FreeLE(to uint64) { - if in.count == 0 || to < in.buffer[in.start] { + if in.count == 0 || to < in.buffer[in.start].index { // out of the left side of the window return } idx := in.start var i int + var bytes uint64 for i = 0; i < in.count; i++ { - if to < in.buffer[idx] { // found the first large inflight + if to < in.buffer[idx].index { // found the first large inflight break } + bytes += in.buffer[idx].bytes // increase index and maybe rotate size := in.size @@ -105,6 +118,7 @@ func (in *Inflights) FreeLE(to uint64) { } // free i inflights and set new start index in.count -= i + in.bytes -= bytes in.start = idx if in.count == 0 { // inflights is empty, reset the start index so that we don't grow the @@ -113,13 +127,9 @@ func (in *Inflights) FreeLE(to uint64) { } } -// FreeFirstOne releases the first inflight. This is a no-op if nothing is -// inflight. -func (in *Inflights) FreeFirstOne() { in.FreeLE(in.buffer[in.start]) } - // Full returns true if no more messages can be sent at the moment. func (in *Inflights) Full() bool { - return in.count == in.size + return in.count == in.size || (in.maxBytes != 0 && in.bytes >= in.maxBytes) } // Count returns the number of inflight messages. @@ -127,6 +137,7 @@ func (in *Inflights) Count() int { return in.count } // reset frees all inflights. func (in *Inflights) reset() { - in.count = 0 in.start = 0 + in.count = 0 + in.bytes = 0 } diff --git a/vendor/go.etcd.io/etcd/raft/v3/tracker/progress.go b/vendor/go.etcd.io/raft/v3/tracker/progress.go similarity index 70% rename from vendor/go.etcd.io/etcd/raft/v3/tracker/progress.go rename to vendor/go.etcd.io/raft/v3/tracker/progress.go index a36e5261ac720..4002b1ec3543a 100644 --- a/vendor/go.etcd.io/etcd/raft/v3/tracker/progress.go +++ b/vendor/go.etcd.io/raft/v3/tracker/progress.go @@ -42,24 +42,42 @@ type Progress struct { // before and stops sending any replication message. State StateType - // PendingSnapshot is used in StateSnapshot. - // If there is a pending snapshot, the pendingSnapshot will be set to the - // index of the snapshot. If pendingSnapshot is set, the replication process of - // this Progress will be paused. raft will not resend snapshot until the pending one - // is reported to be failed. + // PendingSnapshot is used in StateSnapshot and tracks the last index of the + // leader at the time at which it realized a snapshot was necessary. This + // matches the index in the MsgSnap message emitted from raft. + // + // While there is a pending snapshot, replication to the follower is paused. + // The follower will transition back to StateReplicate if the leader + // receives an MsgAppResp from it that reconnects the follower to the + // leader's log (such an MsgAppResp is emitted when the follower applies a + // snapshot). It may be surprising that PendingSnapshot is not taken into + // account here, but consider that complex systems may delegate the sending + // of snapshots to alternative datasources (i.e. not the leader). In such + // setups, it is difficult to manufacture a snapshot at a particular index + // requested by raft and the actual index may be ahead or behind. This + // should be okay, as long as the snapshot allows replication to resume. + // + // The follower will transition to StateProbe if ReportSnapshot is called on + // the leader; if SnapshotFinish is passed then PendingSnapshot becomes the + // basis for the next attempt to append. In practice, the first mechanism is + // the one that is relevant in most cases. However, if this MsgAppResp is + // lost (fallible network) then the second mechanism ensures that in this + // case the follower does not erroneously remain in StateSnapshot. PendingSnapshot uint64 // RecentActive is true if the progress is recently active. Receiving any messages // from the corresponding follower indicates the progress is active. // RecentActive can be reset to false after an election timeout. - // - // TODO(tbg): the leader should always have this set to true. + // This is always true on the leader. RecentActive bool - // ProbeSent is used while this follower is in StateProbe. When ProbeSent is - // true, raft should pause sending replication message to this peer until - // ProbeSent is reset. See ProbeAcked() and IsPaused(). - ProbeSent bool + // MsgAppFlowPaused is used when the MsgApp flow to a node is throttled. This + // happens in StateProbe, or StateReplicate with saturated Inflights. In both + // cases, we need to continue sending MsgApp once in a while to guarantee + // progress, but we only do so when MsgAppFlowPaused is false (it is reset on + // receiving a heartbeat response), to not overflow the receiver. See + // IsPaused(). + MsgAppFlowPaused bool // Inflights is a sliding window for the inflight messages. // Each inflight message contains one or more log entries. @@ -79,10 +97,10 @@ type Progress struct { IsLearner bool } -// ResetState moves the Progress into the specified State, resetting ProbeSent, +// ResetState moves the Progress into the specified State, resetting MsgAppFlowPaused, // PendingSnapshot, and Inflights. func (pr *Progress) ResetState(state StateType) { - pr.ProbeSent = false + pr.MsgAppFlowPaused = false pr.PendingSnapshot = 0 pr.State = state pr.Inflights.reset() @@ -102,13 +120,6 @@ func min(a, b uint64) uint64 { return a } -// ProbeAcked is called when this peer has accepted an append. It resets -// ProbeSent to signal that additional append messages should be sent without -// further delay. -func (pr *Progress) ProbeAcked() { - pr.ProbeSent = false -} - // BecomeProbe transitions into StateProbe. Next is reset to Match+1 or, // optionally and if larger, the index of the pending snapshot. func (pr *Progress) BecomeProbe() { @@ -138,6 +149,33 @@ func (pr *Progress) BecomeSnapshot(snapshoti uint64) { pr.PendingSnapshot = snapshoti } +// UpdateOnEntriesSend updates the progress on the given number of consecutive +// entries being sent in a MsgApp, with the given total bytes size, appended at +// and after the given log index. +func (pr *Progress) UpdateOnEntriesSend(entries int, bytes, nextIndex uint64) error { + switch pr.State { + case StateReplicate: + if entries > 0 { + last := nextIndex + uint64(entries) - 1 + pr.OptimisticUpdate(last) + pr.Inflights.Add(last, bytes) + } + // If this message overflows the in-flights tracker, or it was already full, + // consider this message being a probe, so that the flow is paused. + pr.MsgAppFlowPaused = pr.Inflights.Full() + case StateProbe: + // TODO(pavelkalinnikov): this condition captures the previous behaviour, + // but we should set MsgAppFlowPaused unconditionally for simplicity, because any + // MsgApp in StateProbe is a probe, not only non-empty ones. + if entries > 0 { + pr.MsgAppFlowPaused = true + } + default: + return fmt.Errorf("sending append in unhandled state %s", pr.State) + } + return nil +} + // MaybeUpdate is called when an MsgAppResp arrives from the follower, with the // index acked by it. The method returns false if the given n index comes from // an outdated message. Otherwise it updates the progress and returns true. @@ -146,7 +184,7 @@ func (pr *Progress) MaybeUpdate(n uint64) bool { if pr.Match < n { pr.Match = n updated = true - pr.ProbeAcked() + pr.MsgAppFlowPaused = false } pr.Next = max(pr.Next, n+1) return updated @@ -188,7 +226,7 @@ func (pr *Progress) MaybeDecrTo(rejected, matchHint uint64) bool { } pr.Next = max(min(rejected, matchHint+1), 1) - pr.ProbeSent = false + pr.MsgAppFlowPaused = false return true } @@ -201,9 +239,9 @@ func (pr *Progress) MaybeDecrTo(rejected, matchHint uint64) bool { func (pr *Progress) IsPaused() bool { switch pr.State { case StateProbe: - return pr.ProbeSent + return pr.MsgAppFlowPaused case StateReplicate: - return pr.Inflights.Full() + return pr.MsgAppFlowPaused case StateSnapshot: return true default: @@ -224,7 +262,7 @@ func (pr *Progress) String() string { fmt.Fprintf(&buf, " pendingSnap=%d", pr.PendingSnapshot) } if !pr.RecentActive { - fmt.Fprintf(&buf, " inactive") + fmt.Fprint(&buf, " inactive") } if n := pr.Inflights.Count(); n > 0 { fmt.Fprintf(&buf, " inflight=%d", n) diff --git a/vendor/go.etcd.io/etcd/raft/v3/tracker/state.go b/vendor/go.etcd.io/raft/v3/tracker/state.go similarity index 95% rename from vendor/go.etcd.io/etcd/raft/v3/tracker/state.go rename to vendor/go.etcd.io/raft/v3/tracker/state.go index 285b4b8f58021..7dbdd63fa666c 100644 --- a/vendor/go.etcd.io/etcd/raft/v3/tracker/state.go +++ b/vendor/go.etcd.io/raft/v3/tracker/state.go @@ -39,4 +39,4 @@ var prstmap = [...]string{ "StateSnapshot", } -func (st StateType) String() string { return prstmap[uint64(st)] } +func (st StateType) String() string { return prstmap[st] } diff --git a/vendor/go.etcd.io/etcd/raft/v3/tracker/tracker.go b/vendor/go.etcd.io/raft/v3/tracker/tracker.go similarity index 96% rename from vendor/go.etcd.io/etcd/raft/v3/tracker/tracker.go rename to vendor/go.etcd.io/raft/v3/tracker/tracker.go index 72dcc73b8667e..2710a7a4ffedc 100644 --- a/vendor/go.etcd.io/etcd/raft/v3/tracker/tracker.go +++ b/vendor/go.etcd.io/raft/v3/tracker/tracker.go @@ -19,8 +19,8 @@ import ( "sort" "strings" - "go.etcd.io/etcd/raft/v3/quorum" - pb "go.etcd.io/etcd/raft/v3/raftpb" + "go.etcd.io/raft/v3/quorum" + pb "go.etcd.io/raft/v3/raftpb" ) // Config reflects the configuration tracked in a ProgressTracker. @@ -87,7 +87,7 @@ func (c Config) String() string { fmt.Fprintf(&buf, " learners_next=%s", quorum.MajorityConfig(c.LearnersNext).String()) } if c.AutoLeave { - fmt.Fprintf(&buf, " autoleave") + fmt.Fprint(&buf, " autoleave") } return buf.String() } @@ -121,13 +121,15 @@ type ProgressTracker struct { Votes map[uint64]bool - MaxInflight int + MaxInflight int + MaxInflightBytes uint64 } // MakeProgressTracker initializes a ProgressTracker. -func MakeProgressTracker(maxInflight int) ProgressTracker { +func MakeProgressTracker(maxInflight int, maxBytes uint64) ProgressTracker { p := ProgressTracker{ - MaxInflight: maxInflight, + MaxInflight: maxInflight, + MaxInflightBytes: maxBytes, Config: Config{ Voters: quorum.JointConfig{ quorum.MajorityConfig{}, diff --git a/vendor/go.etcd.io/etcd/raft/v3/util.go b/vendor/go.etcd.io/raft/v3/util.go similarity index 58% rename from vendor/go.etcd.io/etcd/raft/v3/util.go rename to vendor/go.etcd.io/raft/v3/util.go index 94ab368f608b4..44dde5c568615 100644 --- a/vendor/go.etcd.io/etcd/raft/v3/util.go +++ b/vendor/go.etcd.io/raft/v3/util.go @@ -19,7 +19,7 @@ import ( "fmt" "strings" - pb "go.etcd.io/etcd/raft/v3/raftpb" + pb "go.etcd.io/raft/v3/raftpb" ) func (st StateType) MarshalJSON() ([]byte, error) { @@ -40,13 +40,44 @@ func max(a, b uint64) uint64 { return b } +var isLocalMsg = [...]bool{ + pb.MsgHup: true, + pb.MsgBeat: true, + pb.MsgUnreachable: true, + pb.MsgSnapStatus: true, + pb.MsgCheckQuorum: true, + pb.MsgStorageAppend: true, + pb.MsgStorageAppendResp: true, + pb.MsgStorageApply: true, + pb.MsgStorageApplyResp: true, +} + +var isResponseMsg = [...]bool{ + pb.MsgAppResp: true, + pb.MsgVoteResp: true, + pb.MsgHeartbeatResp: true, + pb.MsgUnreachable: true, + pb.MsgReadIndexResp: true, + pb.MsgPreVoteResp: true, + pb.MsgStorageAppendResp: true, + pb.MsgStorageApplyResp: true, +} + +func isMsgInArray(msgt pb.MessageType, arr []bool) bool { + i := int(msgt) + return i < len(arr) && arr[i] +} + func IsLocalMsg(msgt pb.MessageType) bool { - return msgt == pb.MsgHup || msgt == pb.MsgBeat || msgt == pb.MsgUnreachable || - msgt == pb.MsgSnapStatus || msgt == pb.MsgCheckQuorum + return isMsgInArray(msgt, isLocalMsg[:]) } func IsResponseMsg(msgt pb.MessageType) bool { - return msgt == pb.MsgAppResp || msgt == pb.MsgVoteResp || msgt == pb.MsgHeartbeatResp || msgt == pb.MsgUnreachable || msgt == pb.MsgPreVoteResp + return isMsgInArray(msgt, isResponseMsg[:]) +} + +func IsLocalMsgTarget(id uint64) bool { + return id == LocalAppendThread || id == LocalApplyThread } // voteResponseType maps vote and prevote message types to their corresponding responses. @@ -132,33 +163,54 @@ type EntryFormatter func([]byte) string // Message for debugging. func DescribeMessage(m pb.Message, f EntryFormatter) string { var buf bytes.Buffer - fmt.Fprintf(&buf, "%x->%x %v Term:%d Log:%d/%d", m.From, m.To, m.Type, m.Term, m.LogTerm, m.Index) + fmt.Fprintf(&buf, "%s->%s %v Term:%d Log:%d/%d", + describeTarget(m.From), describeTarget(m.To), m.Type, m.Term, m.LogTerm, m.Index) if m.Reject { fmt.Fprintf(&buf, " Rejected (Hint: %d)", m.RejectHint) } if m.Commit != 0 { fmt.Fprintf(&buf, " Commit:%d", m.Commit) } + if m.Vote != 0 { + fmt.Fprintf(&buf, " Vote:%d", m.Vote) + } if len(m.Entries) > 0 { - fmt.Fprintf(&buf, " Entries:[") + fmt.Fprint(&buf, " Entries:[") for i, e := range m.Entries { if i != 0 { buf.WriteString(", ") } buf.WriteString(DescribeEntry(e, f)) } - fmt.Fprintf(&buf, "]") + fmt.Fprint(&buf, "]") + } + if s := m.Snapshot; s != nil && !IsEmptySnap(*s) { + fmt.Fprintf(&buf, " Snapshot: %s", DescribeSnapshot(*s)) } - if !IsEmptySnap(m.Snapshot) { - fmt.Fprintf(&buf, " Snapshot: %s", DescribeSnapshot(m.Snapshot)) + if len(m.Responses) > 0 { + fmt.Fprintf(&buf, " Responses:[") + for i, m := range m.Responses { + if i != 0 { + buf.WriteString(", ") + } + buf.WriteString(DescribeMessage(m, f)) + } + fmt.Fprintf(&buf, "]") } return buf.String() } -// PayloadSize is the size of the payload of this Entry. Notably, it does not -// depend on its Index or Term. -func PayloadSize(e pb.Entry) int { - return len(e.Data) +func describeTarget(id uint64) string { + switch id { + case None: + return "None" + case LocalAppendThread: + return "AppendThread" + case LocalApplyThread: + return "ApplyThread" + default: + return fmt.Sprintf("%x", id) + } } // DescribeEntry returns a concise human-readable description of an @@ -209,19 +261,54 @@ func DescribeEntries(ents []pb.Entry, f EntryFormatter) string { return buf.String() } -func limitSize(ents []pb.Entry, maxSize uint64) []pb.Entry { +// entryEncodingSize represents the protocol buffer encoding size of one or more +// entries. +type entryEncodingSize uint64 + +func entsSize(ents []pb.Entry) entryEncodingSize { + var size entryEncodingSize + for _, ent := range ents { + size += entryEncodingSize(ent.Size()) + } + return size +} + +// limitSize returns the longest prefix of the given entries slice, such that +// its total byte size does not exceed maxSize. Always returns a non-empty slice +// if the input is non-empty, so, as an exception, if the size of the first +// entry exceeds maxSize, a non-empty slice with just this entry is returned. +func limitSize(ents []pb.Entry, maxSize entryEncodingSize) []pb.Entry { if len(ents) == 0 { return ents } size := ents[0].Size() - var limit int - for limit = 1; limit < len(ents); limit++ { + for limit := 1; limit < len(ents); limit++ { size += ents[limit].Size() - if uint64(size) > maxSize { - break + if entryEncodingSize(size) > maxSize { + return ents[:limit] } } - return ents[:limit] + return ents +} + +// entryPayloadSize represents the size of one or more entries' payloads. +// Notably, it does not depend on its Index or Term. Entries with empty +// payloads, like those proposed after a leadership change, are considered +// to be zero size. +type entryPayloadSize uint64 + +// payloadSize is the size of the payload of the provided entry. +func payloadSize(e pb.Entry) entryPayloadSize { + return entryPayloadSize(len(e.Data)) +} + +// payloadsSize is the size of the payloads of the provided entries. +func payloadsSize(ents []pb.Entry) entryPayloadSize { + var s entryPayloadSize + for _, e := range ents { + s += payloadSize(e) + } + return s } func assertConfStatesEquivalent(l Logger, cs1, cs2 pb.ConfState) { @@ -231,3 +318,20 @@ func assertConfStatesEquivalent(l Logger, cs1, cs2 pb.ConfState) { } l.Panic(err) } + +// extend appends vals to the given dst slice. It differs from the standard +// slice append only in the way it allocates memory. If cap(dst) is not enough +// for appending the values, precisely size len(dst)+len(vals) is allocated. +// +// Use this instead of standard append in situations when this is the last +// append to dst, so there is no sense in allocating more than needed. +func extend(dst, vals []pb.Entry) []pb.Entry { + need := len(dst) + len(vals) + if need <= cap(dst) { + return append(dst, vals...) // does not allocate + } + buf := make([]pb.Entry, need, need) // allocates precisely what's needed + copy(buf, dst) + copy(buf[len(dst):], vals) + return buf +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go index ab091cf6ade33..18436eaedffd8 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go @@ -42,6 +42,8 @@ type config struct { TracerProvider trace.TracerProvider MeterProvider metric.MeterProvider SpanStartOptions []trace.SpanStartOption + SpanAttributes []attribute.KeyValue + MetricAttributes []attribute.KeyValue ReceivedEvent bool SentEvent bool @@ -257,3 +259,29 @@ func (o spanStartOption) apply(c *config) { func WithSpanOptions(opts ...trace.SpanStartOption) Option { return spanStartOption{opts} } + +type spanAttributesOption struct{ a []attribute.KeyValue } + +func (o spanAttributesOption) apply(c *config) { + if o.a != nil { + c.SpanAttributes = o.a + } +} + +// WithSpanAttributes returns an Option to add custom attributes to the spans. +func WithSpanAttributes(a ...attribute.KeyValue) Option { + return spanAttributesOption{a: a} +} + +type metricAttributesOption struct{ a []attribute.KeyValue } + +func (o metricAttributesOption) apply(c *config) { + if o.a != nil { + c.MetricAttributes = o.a + } +} + +// WithMetricAttributes returns an Option to add custom attributes to the metrics. +func WithMetricAttributes(a ...attribute.KeyValue) Option { + return metricAttributesOption{a: a} +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go index 201867a86944a..fbcbfb84e047e 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go @@ -62,11 +62,11 @@ func (h *serverHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)), name, trace.WithSpanKind(trace.SpanKindServer), - trace.WithAttributes(attrs...), + trace.WithAttributes(append(attrs, h.config.SpanAttributes...)...), ) gctx := gRPCContext{ - metricAttrs: attrs, + metricAttrs: append(attrs, h.config.MetricAttributes...), record: true, } if h.config.Filter != nil { @@ -102,11 +102,11 @@ func (h *clientHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont ctx, name, trace.WithSpanKind(trace.SpanKindClient), - trace.WithAttributes(attrs...), + trace.WithAttributes(append(attrs, h.config.SpanAttributes...)...), ) gctx := gRPCContext{ - metricAttrs: attrs, + metricAttrs: append(attrs, h.config.MetricAttributes...), record: true, } if h.config.Filter != nil { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go index a15d06cb0c451..439251e13476c 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go @@ -5,7 +5,7 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.g // Version is the current release version of the gRPC instrumentation. func Version() string { - return "0.53.0" + return "0.56.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore index 895c7664beb5c..ae8577ef366aa 100644 --- a/vendor/go.opentelemetry.io/otel/.gitignore +++ b/vendor/go.opentelemetry.io/otel/.gitignore @@ -12,11 +12,3 @@ go.work go.work.sum gen/ - -/example/dice/dice -/example/namedtracer/namedtracer -/example/otel-collector/otel-collector -/example/opencensus/opencensus -/example/passthrough/passthrough -/example/prometheus/prometheus -/example/zipkin/zipkin diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index 6d9c8b64958b3..dbfb2a165a069 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -9,6 +9,8 @@ linters: disable-all: true # Specifically enable linters we want to use. enable: + - asasalint + - bodyclose - depguard - errcheck - errorlint @@ -23,6 +25,7 @@ linters: - revive - staticcheck - tenv + - testifylint - typecheck - unconvert - unused @@ -62,12 +65,12 @@ issues: - path: _test\.go linters: - gosec - # Igonoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) + # Ignoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) # as we commonly use it in tests and examples. - text: "G404:" linters: - gosec - # Igonoring gosec G402: TLS MinVersion too low + # Ignoring gosec G402: TLS MinVersion too low # as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well. - text: "G402: TLS MinVersion too low." linters: @@ -124,8 +127,6 @@ linters-settings: - "**/metric/**/*.go" - "**/bridge/*.go" - "**/bridge/**/*.go" - - "**/example/*.go" - - "**/example/**/*.go" - "**/trace/*.go" - "**/trace/**/*.go" - "**/log/*.go" @@ -300,3 +301,9 @@ linters-settings: # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value - name: waitgroup-by-value disabled: false + testifylint: + enable-all: true + disable: + - float-compare + - go-require + - require-error diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index c01e6998e0b3c..8f68dbd04aefb 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -8,6 +8,158 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ## [Unreleased] + + + +## [1.32.0/0.54.0/0.8.0/0.0.11] 2024-11-08 + +### Added + +- Add `go.opentelemetry.io/otel/sdk/metric/exemplar.AlwaysOffFilter`, which can be used to disable exemplar recording. (#5850) +- Add `go.opentelemetry.io/otel/sdk/metric.WithExemplarFilter`, which can be used to configure the exemplar filter used by the metrics SDK. (#5850) +- Add `ExemplarReservoirProviderSelector` and `DefaultExemplarReservoirProviderSelector` to `go.opentelemetry.io/otel/sdk/metric`, which defines the exemplar reservoir to use based on the aggregation of the metric. (#5861) +- Add `ExemplarReservoirProviderSelector` to `go.opentelemetry.io/otel/sdk/metric.Stream` to allow using views to configure the exemplar reservoir to use for a metric. (#5861) +- Add `ReservoirProvider`, `HistogramReservoirProvider` and `FixedSizeReservoirProvider` to `go.opentelemetry.io/otel/sdk/metric/exemplar` to make it convenient to use providers of Reservoirs. (#5861) +- The `go.opentelemetry.io/otel/semconv/v1.27.0` package. + The package contains semantic conventions from the `v1.27.0` version of the OpenTelemetry Semantic Conventions. (#5894) +- Add `Attributes attribute.Set` field to `Scope` in `go.opentelemetry.io/otel/sdk/instrumentation`. (#5903) +- Add `Attributes attribute.Set` field to `ScopeRecords` in `go.opentelemetry.io/otel/log/logtest`. (#5927) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` adds instrumentation scope attributes. (#5934) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` adds instrumentation scope attributes. (#5934) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` adds instrumentation scope attributes. (#5935) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` adds instrumentation scope attributes. (#5935) +- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` adds instrumentation scope attributes. (#5933) +- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` adds instrumentation scope attributes. (#5933) +- `go.opentelemetry.io/otel/exporters/prometheus` adds instrumentation scope attributes in `otel_scope_info` metric as labels. (#5932) + +### Changed + +- Support scope attributes and make them as identifying for `Tracer` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/trace`. (#5924) +- Support scope attributes and make them as identifying for `Meter` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/metric`. (#5926) +- Support scope attributes and make them as identifying for `Logger` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/log`. (#5925) +- Make schema URL and scope attributes as identifying for `Tracer` in `go.opentelemetry.io/otel/bridge/opentracing`. (#5931) +- Clear unneeded slice elements to allow GC to collect the objects in `go.opentelemetry.io/otel/sdk/metric` and `go.opentelemetry.io/otel/sdk/trace`. (#5804) + +### Fixed + +- Global MeterProvider registration unwraps global instrument Observers, the undocumented Unwrap() methods are now private. (#5881) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5892) +- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5911) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5915) +- Fix `go.opentelemetry.io/otel/exporters/prometheus` trying to add exemplars to Gauge metrics, which is unsupported. (#5912) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#5944) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5944) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#5944) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5944) +- Fix incorrect metrics generated from callbacks when multiple readers are used in `go.opentelemetry.io/otel/sdk/metric`. (#5900) + +### Removed + +- Remove all examples under `go.opentelemetry.io/otel/example` as they are moved to [Contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples). (#5930) + +## [1.31.0/0.53.0/0.7.0/0.0.10] 2024-10-11 + +### Added + +- Add `go.opentelemetry.io/otel/sdk/metric/exemplar` package which includes `Exemplar`, `Filter`, `TraceBasedFilter`, `AlwaysOnFilter`, `HistogramReservoir`, `FixedSizeReservoir`, `Reservoir`, `Value` and `ValueType` types. These will be used for configuring the exemplar reservoir for the metrics sdk. (#5747, #5862) +- Add `WithExportBufferSize` option to log batch processor.(#5877) + +### Changed + +- Enable exemplars by default in `go.opentelemetry.io/otel/sdk/metric`. Exemplars can be disabled by setting `OTEL_METRICS_EXEMPLAR_FILTER=always_off` (#5778) +- `Logger.Enabled` in `go.opentelemetry.io/otel/log` now accepts a newly introduced `EnabledParameters` type instead of `Record`. (#5791) +- `FilterProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log/internal/x` now accepts `EnabledParameters` instead of `Record`. (#5791) +- The `Record` type in `go.opentelemetry.io/otel/log` is no longer comparable. (#5847) +- Performance improvements for the trace SDK `SetAttributes` method in `Span`. (#5864) +- Reduce memory allocations for the `Event` and `Link` lists in `Span`. (#5858) +- Performance improvements for the trace SDK `AddEvent`, `AddLink`, `RecordError` and `End` methods in `Span`. (#5874) + +### Deprecated + +- Deprecate all examples under `go.opentelemetry.io/otel/example` as they are moved to [Contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples). (#5854) + +### Fixed + +- The race condition for multiple `FixedSize` exemplar reservoirs identified in #5814 is resolved. (#5819) +- Fix log records duplication in case of heterogeneous resource attributes by correctly mapping each log record to it's resource and scope. (#5803) +- Fix timer channel drain to avoid hanging on Go 1.23. (#5868) +- Fix delegation for global meter providers, and panic when calling otel.SetMeterProvider. (#5827) +- Change the `reflect.TypeOf` to use a nil pointer to not allocate on the heap unless necessary. (#5827) + +## [1.30.0/0.52.0/0.6.0/0.0.9] 2024-09-09 + +### Added + +- Support `OTEL_EXPORTER_OTLP_LOGS_INSECURE` and `OTEL_EXPORTER_OTLP_INSECURE` environments in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#5739) +- The `WithResource` option for `NewMeterProvider` now merges the provided resources with the ones from environment variables. (#5773) +- The `WithResource` option for `NewLoggerProvider` now merges the provided resources with the ones from environment variables. (#5773) +- Add UTF-8 support to `go.opentelemetry.io/otel/exporters/prometheus`. (#5755) + +### Fixed + +- Fix memory leak in the global `MeterProvider` when identical instruments are repeatedly created. (#5754) +- Fix panic on instruments creation when setting meter provider. (#5758) +- Fix an issue where `SetMeterProvider` in `go.opentelemetry.io/otel` might miss the delegation for instruments and registries. (#5780) + +### Removed + +- Drop support for [Go 1.21]. (#5736, #5740, #5800) + +## [1.29.0/0.51.0/0.5.0] 2024-08-23 + +This release is the last to support [Go 1.21]. +The next release will require at least [Go 1.22]. + +### Added + +- Add MacOS ARM64 platform to the compatibility testing suite. (#5577) +- Add `InstrumentationScope` field to `SpanStub` in `go.opentelemetry.io/otel/sdk/trace/tracetest`, as a replacement for the deprecated `InstrumentationLibrary`. (#5627) +- Make the initial release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. + This new module contains an OTLP exporter that transmits log telemetry using gRPC. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5629) +- Add `Walk` function to `TraceState` in `go.opentelemetry.io/otel/trace` to iterate all the key-value pairs. (#5651) +- Bridge the trace state in `go.opentelemetry.io/otel/bridge/opencensus`. (#5651) +- Zero value of `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` no longer panics. (#5665) +- The `FilterProcessor` interface type is added in `go.opentelemetry.io/otel/sdk/log/internal/x`. + This is an optional and experimental interface that log `Processor`s can implement to instruct the `Logger` if a `Record` will be processed or not. + It replaces the existing `Enabled` method that is removed from the `Processor` interface itself. + It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#5692) +- Support [Go 1.23]. (#5720) + +### Changed + +- `NewMemberRaw`, `NewKeyProperty` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage` allow UTF-8 string in key. (#5132) +- `Processor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` now accepts a pointer to `Record` instead of a value so that the record modifications done in a processor are propagated to subsequent registered processors. (#5636) +- `SimpleProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log` now returns `false` if the exporter is `nil`. (#5665) +- Update the concurrency requirements of `Exporter` in `go.opentelemetry.io/otel/sdk/log`. (#5666) +- `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` synchronizes `OnEmit` calls. (#5666) +- The `Processor` interface in `go.opentelemetry.io/otel/sdk/log` no longer includes the `Enabled` method. + See the `FilterProcessor` interface type added in `go.opentelemetry.io/otel/sdk/log/internal/x` to continue providing this functionality. (#5692) +- The `SimpleProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693) +- The `BatchProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693) + +### Fixed + +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5584) +- Pass the underlying error rather than a generic retry-able failure in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5541) +- Correct the `Tracer`, `Meter`, and `Logger` names used in `go.opentelemetry.io/otel/example/dice`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/namedtracer`. (#5612) +- Correct the `Tracer` name used in `go.opentelemetry.io/otel/example/opencensus`. (#5612) +- Correct the `Tracer` and `Meter` names used in `go.opentelemetry.io/otel/example/otel-collector`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/passthrough`. (#5612) +- Correct the `Meter` name used in `go.opentelemetry.io/otel/example/prometheus`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/zipkin`. (#5612) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5641) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5650) +- Stop percent encoding header environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705) +- Remove invalid environment variable header keys in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705) + +### Removed + +- The `Enabled` method of the `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692) +- The `Enabled` method of the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692) + ## [1.28.0/0.50.0/0.4.0] 2024-07-02 ### Added @@ -49,6 +201,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm - Fix stale timestamps reported by the last-value aggregation. (#5517) - Indicate the `Exporter` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` must be created by the `New` method. (#5521) - Improved performance in all `{Bool,Int64,Float64,String}SliceValue` functions of `go.opentelemetry.io/attributes` by reducing the number of allocations. (#5549) +- Replace invalid percent-encoded octet sequences with replacement char in `go.opentelemetry.io/otel/baggage`. (#5528) ## [1.27.0/0.49.0/0.3.0] 2024-05-21 @@ -175,7 +328,7 @@ The next release will require at least [Go 1.21]. This module includes OpenTelemetry Go's implementation of the Logs Bridge API. This module is in an alpha state, it is subject to breaking changes. See our [versioning policy](./VERSIONING.md) for more info. (#4961) -- ARM64 platform to the compatibility testing suite. (#4994) +- Add ARM64 platform to the compatibility testing suite. (#4994) ### Fixed @@ -1836,7 +1989,7 @@ with major version 0. - Setting error status while recording error with Span from oteltest package. (#1729) - The concept of a remote and local Span stored in a context is unified to just the current Span. Because of this `"go.opentelemetry.io/otel/trace".RemoteSpanContextFromContext` is removed as it is no longer needed. - Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContex` can be used to return the current Span. + Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContext` can be used to return the current Span. If needed, that Span's `SpanContext.IsRemote()` can then be used to determine if it is remote or not. (#1731) - The `HasRemoteParent` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is removed. This field is redundant to the information returned from the `Remote` method of the `SpanContext` held in the `ParentContext` field. (#1749) @@ -2410,7 +2563,7 @@ This release migrates the default OpenTelemetry SDK into its own Go module, deco - Prometheus exporter will not apply stale updates or forget inactive metrics. (#903) - Add test for api.standard `HTTPClientAttributesFromHTTPRequest`. (#905) - Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.1 in /tools. (#901, #913) -- Update otel-colector example to use the v0.5.0 collector. (#915) +- Update otel-collector example to use the v0.5.0 collector. (#915) - The `grpctrace` instrumentation uses a span name conforming to the OpenTelemetry semantic conventions (does not contain a leading slash (`/`)). (#922) - The `grpctrace` instrumentation includes an `rpc.method` attribute now set to the gRPC method name. (#900, #922) - The `grpctrace` instrumentation `rpc.service` attribute now contains the package name if one exists. @@ -3003,7 +3156,11 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.28.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.32.0...HEAD +[1.32.0/0.54.0/0.8.0/0.0.11]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.32.0 +[1.31.0/0.53.0/0.7.0/0.0.10]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.31.0 +[1.30.0/0.52.0/0.6.0/0.0.9]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.30.0 +[1.29.0/0.51.0/0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.29.0 [1.28.0/0.50.0/0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.28.0 [1.27.0/0.49.0/0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.27.0 [1.26.0/0.48.0/0.2.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.26.0 @@ -3086,6 +3243,9 @@ It contains api and sdk for trace and meter. [0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1 [0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0 + + +[Go 1.23]: https://go.dev/doc/go1.23 [Go 1.22]: https://go.dev/doc/go1.22 [Go 1.21]: https://go.dev/doc/go1.21 [Go 1.20]: https://go.dev/doc/go1.20 diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS index 2025549332305..945a07d2b072b 100644 --- a/vendor/go.opentelemetry.io/otel/CODEOWNERS +++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -5,13 +5,13 @@ ##################################################### # # Learn about membership in OpenTelemetry community: -# https://github.com/open-telemetry/community/blob/main/community-membership.md +# https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md # # # Learn about CODEOWNERS file format: # https://help.github.com/en/articles/about-code-owners # -* @MrAlias @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu +* @MrAlias @XSAM @dashpole @pellared @dmathieu -CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole @XSAM @dmathieu +CODEOWNERS @MrAlias @pellared @dashpole @XSAM @dmathieu diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index b86572f58ea5a..22a2e9dbd4952 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -578,7 +578,10 @@ See also: The tests should never leak goroutines. Use the term `ConcurrentSafe` in the test name when it aims to verify the -absence of race conditions. +absence of race conditions. The top-level tests with this term will be run +many times in the `test-concurrent-safe` CI job to increase the chance of +catching concurrency issues. This does not apply to subtests when this term +is not in their root name. ### Internal packages @@ -626,13 +629,14 @@ should be canceled. ## Approvers and Maintainers -### Approvers +### Triagers + +- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent -- [Chester Cheung](https://github.com/hanyuancheung), Tencent +### Approvers ### Maintainers -- [Aaron Clawson](https://github.com/MadVikingGod), LightStep - [Damien Mathieu](https://github.com/dmathieu), Elastic - [David Ashpole](https://github.com/dashpole), Google - [Robert Pająk](https://github.com/pellared), Splunk @@ -641,16 +645,18 @@ should be canceled. ### Emeritus -- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb -- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep -- [Josh MacDonald](https://github.com/jmacd), LightStep -- [Anthony Mirabella](https://github.com/Aneurysm9), AWS -- [Evan Torrie](https://github.com/evantorrie), Yahoo +- [Aaron Clawson](https://github.com/MadVikingGod) +- [Anthony Mirabella](https://github.com/Aneurysm9) +- [Chester Cheung](https://github.com/hanyuancheung) +- [Evan Torrie](https://github.com/evantorrie) +- [Gustavo Silva Paiva](https://github.com/paivagustavo) +- [Josh MacDonald](https://github.com/jmacd) +- [Liz Fong-Jones](https://github.com/lizthegrey) ### Become an Approver or a Maintainer See the [community membership document in OpenTelemetry community -repo](https://github.com/open-telemetry/community/blob/main/community-membership.md). +repo](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md). [Approver]: #approvers [Maintainer]: #maintainers diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index f33619f76a2b9..b8292a4fb910f 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -54,9 +54,6 @@ $(TOOLS)/stringer: PACKAGE=golang.org/x/tools/cmd/stringer PORTO = $(TOOLS)/porto $(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto -GOJQ = $(TOOLS)/gojq -$(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq - GOTMPL = $(TOOLS)/gotmpl $(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl @@ -67,7 +64,7 @@ GOVULNCHECK = $(TOOLS)/govulncheck $(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck .PHONY: tools -tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) +tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) # Virtualized python tools via docker @@ -145,12 +142,14 @@ build-tests/%: # Tests -TEST_TARGETS := test-default test-bench test-short test-verbose test-race +TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-concurrent-safe .PHONY: $(TEST_TARGETS) test test-default test-race: ARGS=-race test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=. test-short: ARGS=-short test-verbose: ARGS=-v -race +test-concurrent-safe: ARGS=-run=ConcurrentSafe -count=100 -race +test-concurrent-safe: TIMEOUT=120 $(TEST_TARGETS): test test: $(OTEL_GO_MOD_DIRS:%=test/%) test/%: DIR=$* @@ -178,17 +177,14 @@ test-coverage: $(GOCOVMERGE) done; \ $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt -# Adding a directory will include all benchmarks in that directory if a filter is not specified. -BENCHMARK_TARGETS := sdk/trace .PHONY: benchmark -benchmark: $(BENCHMARK_TARGETS:%=benchmark/%) -BENCHMARK_FILTER = . -# You can override the filter for a particular directory by adding a rule here. -benchmark/sdk/trace: BENCHMARK_FILTER = SpanWithAttributes_8/AlwaysSample +benchmark: $(OTEL_GO_MOD_DIRS:%=benchmark/%) benchmark/%: - @echo "$(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(BENCHMARK_FILTER) $*..." \ + @echo "$(GO) test -run=xxxxxMatchNothingxxxxx -bench=. $*..." \ && cd $* \ - $(foreach filter, $(BENCHMARK_FILTER), && $(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(filter)) + && $(GO) list ./... \ + | grep -v third_party \ + | xargs $(GO) test -run=xxxxxMatchNothingxxxxx -bench=. .PHONY: golangci-lint golangci-lint-fix golangci-lint-fix: ARGS=--fix @@ -264,7 +260,7 @@ SEMCONVPKG ?= "semconv/" semconv-generate: $(SEMCONVGEN) $(SEMCONVKIT) [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 ) [ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 ) - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -z "$(SEMCONVPKG)/capitalizations.txt" -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=metric -f metric.go -t "$(SEMCONVPKG)/metric_template.j2" -s "$(TAG)" $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index 5a8909317312c..efec278905bb8 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -47,20 +47,22 @@ stop ensuring compatibility with these versions in the following manner: Currently, this project supports the following environments. -| OS | Go Version | Architecture | -|---------|------------|--------------| -| Ubuntu | 1.22 | amd64 | -| Ubuntu | 1.21 | amd64 | -| Ubuntu | 1.22 | 386 | -| Ubuntu | 1.21 | 386 | -| Linux | 1.22 | arm64 | -| Linux | 1.21 | arm64 | -| MacOS | 1.22 | amd64 | -| MacOS | 1.21 | amd64 | -| Windows | 1.22 | amd64 | -| Windows | 1.21 | amd64 | -| Windows | 1.22 | 386 | -| Windows | 1.21 | 386 | +| OS | Go Version | Architecture | +|----------|------------|--------------| +| Ubuntu | 1.23 | amd64 | +| Ubuntu | 1.22 | amd64 | +| Ubuntu | 1.23 | 386 | +| Ubuntu | 1.22 | 386 | +| Linux | 1.23 | arm64 | +| Linux | 1.22 | arm64 | +| macOS 13 | 1.23 | amd64 | +| macOS 13 | 1.22 | amd64 | +| macOS | 1.23 | arm64 | +| macOS | 1.22 | arm64 | +| Windows | 1.23 | amd64 | +| Windows | 1.22 | amd64 | +| Windows | 1.23 | 386 | +| Windows | 1.22 | 386 | While this project should work for other systems, no compatibility guarantees are made for those systems currently. @@ -87,8 +89,8 @@ If you need to extend the telemetry an instrumentation library provides or want to build your own instrumentation for your application directly you will need to use the [Go otel](https://pkg.go.dev/go.opentelemetry.io/otel) -package. The included [examples](./example/) are a good way to see some -practical uses of this process. +package. The [examples](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples) +are a good way to see some practical uses of this process. ### Export diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index 940f57f3d87d1..ffa9b61258ab3 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -69,6 +69,7 @@ Update go.mod for submodules to depend on the new release which will happen in t ``` - Move all the `Unreleased` changes into a new section following the title scheme (`[] - `). + - Make sure the new section is under the comment for released section, like ``, so it is protected from being overwritten in the future. - Update all the appropriate links at the bottom. 4. Push the changes to upstream and create a Pull Request on GitHub. @@ -110,17 +111,6 @@ It is critical you make sure the version you push upstream is correct. Finally create a Release for the new `` on GitHub. The release body should include all the release notes from the Changelog for this release. -## Verify Examples - -After releasing verify that examples build outside of the repository. - -``` -./verify_examples.sh -``` - -The script copies examples into a different directory removes any `replace` declarations in `go.mod` and builds them. -This ensures they build with the published release, not the local copy. - ## Post-Release ### Contrib Repository diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go index bff9c7fdbb969..6cbefceadfe0c 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/set.go +++ b/vendor/go.opentelemetry.io/otel/attribute/set.go @@ -347,45 +347,25 @@ func computeDistinct(kvs []KeyValue) Distinct { func computeDistinctFixed(kvs []KeyValue) interface{} { switch len(kvs) { case 1: - ptr := new([1]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [1]KeyValue(kvs) case 2: - ptr := new([2]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [2]KeyValue(kvs) case 3: - ptr := new([3]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [3]KeyValue(kvs) case 4: - ptr := new([4]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [4]KeyValue(kvs) case 5: - ptr := new([5]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [5]KeyValue(kvs) case 6: - ptr := new([6]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [6]KeyValue(kvs) case 7: - ptr := new([7]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [7]KeyValue(kvs) case 8: - ptr := new([8]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [8]KeyValue(kvs) case 9: - ptr := new([9]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [9]KeyValue(kvs) case 10: - ptr := new([10]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [10]KeyValue(kvs) default: return nil } diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go index c40c896cc6673..36f5367030c9e 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -44,9 +44,15 @@ type Property struct { // NewKeyProperty returns a new Property for key. // +// The passed key must be valid, non-empty UTF-8 string. // If key is invalid, an error will be returned. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on Property key. +// For example, the W3C Baggage specification restricts the Property keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alphanumeric value are strongly recommended to be used as Property key. func NewKeyProperty(key string) (Property, error) { - if !validateKey(key) { + if !validateBaggageName(key) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) } @@ -62,6 +68,10 @@ func NewKeyProperty(key string) (Property, error) { // Notice: Consider using [NewKeyValuePropertyRaw] instead // that does not require percent-encoding of the value. func NewKeyValueProperty(key, value string) (Property, error) { + if !validateKey(key) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + if !validateValue(value) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) } @@ -74,11 +84,20 @@ func NewKeyValueProperty(key, value string) (Property, error) { // NewKeyValuePropertyRaw returns a new Property for key with value. // -// The passed key must be compliant with W3C Baggage specification. +// The passed key must be valid, non-empty UTF-8 string. +// The passed value must be valid UTF-8 string. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on Property key. +// For example, the W3C Baggage specification restricts the Property keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alphanumeric value are strongly recommended to be used as Property key. func NewKeyValuePropertyRaw(key, value string) (Property, error) { - if !validateKey(key) { + if !validateBaggageName(key) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) } + if !validateBaggageValue(value) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) + } p := Property{ key: key, @@ -115,12 +134,15 @@ func (p Property) validate() error { return fmt.Errorf("invalid property: %w", err) } - if !validateKey(p.key) { + if !validateBaggageName(p.key) { return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key)) } if !p.hasValue && p.value != "" { return errFunc(errors.New("inconsistent value")) } + if p.hasValue && !validateBaggageValue(p.value) { + return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value)) + } return nil } @@ -138,7 +160,15 @@ func (p Property) Value() (string, bool) { // String encodes Property into a header string compliant with the W3C Baggage // specification. +// It would return empty string if the key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. func (p Property) String() string { + // W3C Baggage specification does not allow percent-encoded keys. + if !validateKey(p.key) { + return "" + } + if p.hasValue { return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, valueEscape(p.value)) } @@ -203,9 +233,14 @@ func (p properties) validate() error { // String encodes properties into a header string compliant with the W3C Baggage // specification. func (p properties) String() string { - props := make([]string, len(p)) - for i, prop := range p { - props[i] = prop.String() + props := make([]string, 0, len(p)) + for _, prop := range p { + s := prop.String() + + // Ignored empty properties. + if s != "" { + props = append(props, s) + } } return strings.Join(props, propertyDelimiter) } @@ -230,6 +265,10 @@ type Member struct { // Notice: Consider using [NewMemberRaw] instead // that does not require percent-encoding of the value. func NewMember(key, value string, props ...Property) (Member, error) { + if !validateKey(key) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + if !validateValue(value) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) } @@ -242,7 +281,13 @@ func NewMember(key, value string, props ...Property) (Member, error) { // NewMemberRaw returns a new Member from the passed arguments. // -// The passed key must be compliant with W3C Baggage specification. +// The passed key must be valid, non-empty UTF-8 string. +// The passed value must be valid UTF-8 string. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on baggage key. +// For example, the W3C Baggage specification restricts the baggage keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alphanumeric value are strongly recommended to be used as baggage key. func NewMemberRaw(key, value string, props ...Property) (Member, error) { m := Member{ key: key, @@ -294,19 +339,45 @@ func parseMember(member string) (Member, error) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) } - val := strings.TrimSpace(v) - if !validateValue(val) { + rawVal := strings.TrimSpace(v) + if !validateValue(rawVal) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, v) } // Decode a percent-encoded value. - value, err := url.PathUnescape(val) + unescapeVal, err := url.PathUnescape(rawVal) if err != nil { return newInvalidMember(), fmt.Errorf("%w: %w", errInvalidValue, err) } + + value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal) return Member{key: key, value: value, properties: props, hasData: true}, nil } +// replaceInvalidUTF8Sequences replaces invalid UTF-8 sequences with '�'. +func replaceInvalidUTF8Sequences(cap int, unescapeVal string) string { + if utf8.ValidString(unescapeVal) { + return unescapeVal + } + // W3C baggage spec: + // https://github.com/w3c/baggage/blob/8c215efbeebd3fa4b1aceb937a747e56444f22f3/baggage/HTTP_HEADER_FORMAT.md?plain=1#L69 + + var b strings.Builder + b.Grow(cap) + for i := 0; i < len(unescapeVal); { + r, size := utf8.DecodeRuneInString(unescapeVal[i:]) + if r == utf8.RuneError && size == 1 { + // Invalid UTF-8 sequence found, replace it with '�' + _, _ = b.WriteString("�") + } else { + _, _ = b.WriteRune(r) + } + i += size + } + + return b.String() +} + // validate ensures m conforms to the W3C Baggage specification. // A key must be an ASCII string, returning an error otherwise. func (m Member) validate() error { @@ -314,9 +385,12 @@ func (m Member) validate() error { return fmt.Errorf("%w: %q", errInvalidMember, m) } - if !validateKey(m.key) { + if !validateBaggageName(m.key) { return fmt.Errorf("%w: %q", errInvalidKey, m.key) } + if !validateBaggageValue(m.value) { + return fmt.Errorf("%w: %q", errInvalidValue, m.value) + } return m.properties.validate() } @@ -331,10 +405,15 @@ func (m Member) Properties() []Property { return m.properties.Copy() } // String encodes Member into a header string compliant with the W3C Baggage // specification. +// It would return empty string if the key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. func (m Member) String() string { - // A key is just an ASCII string. A value is restricted to be - // US-ASCII characters excluding CTLs, whitespace, - // DQUOTE, comma, semicolon, and backslash. + // W3C Baggage specification does not allow percent-encoded keys. + if !validateKey(m.key) { + return "" + } + s := m.key + keyValueDelimiter + valueEscape(m.value) if len(m.properties) > 0 { s += propertyDelimiter + m.properties.String() @@ -448,7 +527,7 @@ func (b Baggage) Member(key string) Member { } // Members returns all the baggage list-members. -// The order of the returned list-members does not have significance. +// The order of the returned list-members is not significant. // // The returned members are not validated, as we assume the validation happened // when they were added to the Baggage. @@ -469,8 +548,8 @@ func (b Baggage) Members() []Member { return members } -// SetMember returns a copy the Baggage with the member included. If the -// baggage contains a Member with the same key the existing Member is +// SetMember returns a copy of the Baggage with the member included. If the +// baggage contains a Member with the same key, the existing Member is // replaced. // // If member is invalid according to the W3C Baggage specification, an error @@ -528,14 +607,22 @@ func (b Baggage) Len() int { // String encodes Baggage into a header string compliant with the W3C Baggage // specification. +// It would ignore members where the member key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. func (b Baggage) String() string { members := make([]string, 0, len(b.list)) for k, v := range b.list { - members = append(members, Member{ + s := Member{ key: k, value: v.Value, properties: fromInternalProperties(v.Properties), - }.String()) + }.String() + + // Ignored empty members. + if s != "" { + members = append(members, s) + } } return strings.Join(members, listDelimiter) } @@ -607,10 +694,12 @@ func parsePropertyInternal(s string) (p Property, ok bool) { } // Decode a percent-encoded value. - value, err := url.PathUnescape(s[valueStart:valueEnd]) + rawVal := s[valueStart:valueEnd] + unescapeVal, err := url.PathUnescape(rawVal) if err != nil { return } + value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal) ok = true p.key = s[keyStart:keyEnd] @@ -720,6 +809,24 @@ var safeKeyCharset = [utf8.RuneSelf]bool{ '~': true, } +// validateBaggageName checks if the string is a valid OpenTelemetry Baggage name. +// Baggage name is a valid, non-empty UTF-8 string. +func validateBaggageName(s string) bool { + if len(s) == 0 { + return false + } + + return utf8.ValidString(s) +} + +// validateBaggageValue checks if the string is a valid OpenTelemetry Baggage value. +// Baggage value is a valid UTF-8 strings. +// Empty string is also a valid UTF-8 string. +func validateBaggageValue(s string) bool { + return utf8.ValidString(s) +} + +// validateKey checks if the string is a valid W3C Baggage key. func validateKey(s string) bool { if len(s) == 0 { return false @@ -738,6 +845,7 @@ func validateKeyChar(c int32) bool { return c >= 0 && c < int32(utf8.RuneSelf) && safeKeyCharset[c] } +// validateValue checks if the string is a valid W3C Baggage value. func validateValue(s string) bool { for _, c := range s { if !validateValueChar(c) { diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go index df29d96a6daea..2acbac3546651 100644 --- a/vendor/go.opentelemetry.io/otel/codes/codes.go +++ b/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -83,7 +83,7 @@ func (c *Code) UnmarshalJSON(b []byte) error { return fmt.Errorf("invalid code: %q", ci) } - *c = Code(ci) + *c = Code(ci) // nolint: gosec // Bit size of 32 check above. return nil } return fmt.Errorf("invalid code: %q", string(b)) diff --git a/vendor/go.opentelemetry.io/otel/doc.go b/vendor/go.opentelemetry.io/otel/doc.go index 441c595014d35..921f85961ad41 100644 --- a/vendor/go.opentelemetry.io/otel/doc.go +++ b/vendor/go.opentelemetry.io/otel/doc.go @@ -17,6 +17,8 @@ To read more about tracing, see go.opentelemetry.io/otel/trace. To read more about metrics, see go.opentelemetry.io/otel/metric. +To read more about logs, see go.opentelemetry.io/otel/log. + To read more about propagation, see go.opentelemetry.io/otel/propagation and go.opentelemetry.io/otel/baggage. */ diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go index f6dd3decc9002..2e7690e43a24a 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go @@ -13,7 +13,8 @@ func InstrumentationScope(il instrumentation.Scope) *commonpb.InstrumentationSco return nil } return &commonpb.InstrumentationScope{ - Name: il.Name, - Version: il.Version, + Name: il.Name, + Version: il.Version, + Attributes: Iterator(il.Attributes.Iter()), } } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go index c3c69c5a0d635..bf27ef0220e7d 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go @@ -4,6 +4,8 @@ package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" import ( + "math" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/sdk/instrumentation" @@ -95,16 +97,16 @@ func span(sd tracesdk.ReadOnlySpan) *tracepb.Span { SpanId: sid[:], TraceState: sd.SpanContext().TraceState().String(), Status: status(sd.Status().Code, sd.Status().Description), - StartTimeUnixNano: uint64(sd.StartTime().UnixNano()), - EndTimeUnixNano: uint64(sd.EndTime().UnixNano()), + StartTimeUnixNano: uint64(max(0, sd.StartTime().UnixNano())), // nolint:gosec // Overflow checked. + EndTimeUnixNano: uint64(max(0, sd.EndTime().UnixNano())), // nolint:gosec // Overflow checked. Links: links(sd.Links()), Kind: spanKind(sd.SpanKind()), Name: sd.Name(), Attributes: KeyValues(sd.Attributes()), Events: spanEvents(sd.Events()), - DroppedAttributesCount: uint32(sd.DroppedAttributes()), - DroppedEventsCount: uint32(sd.DroppedEvents()), - DroppedLinksCount: uint32(sd.DroppedLinks()), + DroppedAttributesCount: clampUint32(sd.DroppedAttributes()), + DroppedEventsCount: clampUint32(sd.DroppedEvents()), + DroppedLinksCount: clampUint32(sd.DroppedLinks()), } if psid := sd.Parent().SpanID(); psid.IsValid() { @@ -115,6 +117,16 @@ func span(sd tracesdk.ReadOnlySpan) *tracepb.Span { return s } +func clampUint32(v int) uint32 { + if v < 0 { + return 0 + } + if int64(v) > math.MaxUint32 { + return math.MaxUint32 + } + return uint32(v) // nolint: gosec // Overflow/Underflow checked. +} + // status transform a span code and message into an OTLP span status. func status(status codes.Code, message string) *tracepb.Status { var c tracepb.Status_StatusCode @@ -153,7 +165,7 @@ func links(links []tracesdk.Link) []*tracepb.Span_Link { TraceId: tid[:], SpanId: sid[:], Attributes: KeyValues(otLink.Attributes), - DroppedAttributesCount: uint32(otLink.DroppedAttributeCount), + DroppedAttributesCount: clampUint32(otLink.DroppedAttributeCount), Flags: flags, }) } @@ -166,7 +178,7 @@ func buildSpanFlags(sc trace.SpanContext) uint32 { flags |= tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK } - return uint32(flags) + return uint32(flags) // nolint:gosec // Flags is a bitmask and can't be negative } // spanEvents transforms span Events to an OTLP span events. @@ -180,9 +192,9 @@ func spanEvents(es []tracesdk.Event) []*tracepb.Span_Event { for i := 0; i < len(es); i++ { events[i] = &tracepb.Span_Event{ Name: es[i].Name, - TimeUnixNano: uint64(es[i].Time.UnixNano()), + TimeUnixNano: uint64(max(0, es[i].Time.UnixNano())), // nolint:gosec // Overflow checked. Attributes: KeyValues(es[i].Attributes), - DroppedAttributesCount: uint32(es[i].DroppedAttributeCount), + DroppedAttributesCount: clampUint32(es[i].DroppedAttributeCount), } } return events diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go index 3993df927defa..2171bee3c8435 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go @@ -229,7 +229,12 @@ func (c *client) exportContext(parent context.Context) (context.Context, context } if c.metadata.Len() > 0 { - ctx = metadata.NewOutgoingContext(ctx, c.metadata) + md := c.metadata + if outMD, ok := metadata.FromOutgoingContext(ctx); ok { + md = metadata.Join(md, outMD) + } + + ctx = metadata.NewOutgoingContext(ctx, md) } // Unify the client stopCtx with the parent. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go index e783b57ac4bba..b7bd429ffdff8 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go @@ -12,9 +12,8 @@ The environment variables described below can be used for configuration. OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT (default: "https://localhost:4317") - target to which the exporter sends telemetry. The target syntax is defined in https://github.com/grpc/grpc/blob/master/doc/naming.md. -The value must contain a host. -The value may additionally a port, a scheme, and a path. -The value accepts "http" and "https" scheme. +The value must contain a scheme ("http" or "https") and host. +The value may additionally contain a port, and a path. The value should not contain a query string or fragment. OTEL_EXPORTER_OTLP_TRACES_ENDPOINT takes precedence over OTEL_EXPORTER_OTLP_ENDPOINT. The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithGRPCConn] options. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go index 9513c0a57ca66..4abf48d1f622d 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go @@ -15,6 +15,7 @@ import ( "strconv" "strings" "time" + "unicode" "go.opentelemetry.io/otel/internal/global" ) @@ -163,12 +164,16 @@ func stringToHeader(value string) map[string]string { global.Error(errors.New("missing '="), "parse headers", "input", header) continue } - name, err := url.PathUnescape(n) - if err != nil { - global.Error(err, "escape header key", "key", n) + + trimmedName := strings.TrimSpace(n) + + // Validate the key. + if !isValidHeaderKey(trimmedName) { + global.Error(errors.New("invalid header key"), "parse headers", "key", trimmedName) continue } - trimmedName := strings.TrimSpace(name) + + // Only decode the value. value, err := url.PathUnescape(v) if err != nil { global.Error(err, "escape header value", "value", v) @@ -189,3 +194,22 @@ func createCertPool(certBytes []byte) (*x509.CertPool, error) { } return cp, nil } + +func isValidHeaderKey(key string) bool { + if key == "" { + return false + } + for _, c := range key { + if !isTokenChar(c) { + return false + } + } + return true +} + +func isTokenChar(c rune) bool { + return c <= unicode.MaxASCII && (unicode.IsLetter(c) || + unicode.IsDigit(c) || + c == '!' || c == '#' || c == '$' || c == '%' || c == '&' || c == '\'' || c == '*' || + c == '+' || c == '-' || c == '.' || c == '^' || c == '_' || c == '`' || c == '|' || c == '~') +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go index 8f84a79963243..3ee452ef72b23 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go @@ -125,7 +125,7 @@ func NewGRPCConfig(opts ...GRPCOption) Config { if cfg.ServiceConfig != "" { cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig)) } - // Priroritize GRPCCredentials over Insecure (passing both is an error). + // Prioritize GRPCCredentials over Insecure (passing both is an error). if cfg.Traces.GRPCCredentials != nil { cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Traces.GRPCCredentials)) } else if cfg.Traces.Insecure { @@ -278,9 +278,7 @@ func WithEndpointURL(v string) GenericOption { cfg.Traces.Endpoint = u.Host cfg.Traces.URLPath = u.Path - if u.Scheme != "https" { - cfg.Traces.Insecure = true - } + cfg.Traces.Insecure = u.Scheme != "https" return cfg }) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go index 4f2113ae2cf99..1c5450ab62d93 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go @@ -112,7 +112,7 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { } if ctxErr := waitFunc(ctx, delay); ctxErr != nil { - return fmt.Errorf("%w: %s", ctxErr, err) + return fmt.Errorf("%w: %w", ctxErr, err) } } } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go index bbad0e6d01e09..00ab1f20c6d4c 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go @@ -59,8 +59,9 @@ func WithInsecure() Option { // // If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_TRACES_ENDPOINT // environment variable is set, and this option is not passed, that variable -// value will be used. If both are set, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT -// will take precedence. +// value will be used. If both environment variables are set, +// OTEL_EXPORTER_OTLP_TRACES_ENDPOINT will take precedence. If an environment +// variable is set, and this option is passed, this option will take precedence. // // If both this option and WithEndpointURL are used, the last used option will // take precedence. @@ -79,8 +80,9 @@ func WithEndpoint(endpoint string) Option { // // If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_TRACES_ENDPOINT // environment variable is set, and this option is not passed, that variable -// value will be used. If both are set, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT -// will take precedence. +// value will be used. If both environment variables are set, +// OTEL_EXPORTER_OTLP_TRACES_ENDPOINT will take precedence. If an environment +// variable is set, and this option is passed, this option will take precedence. // // If both this option and WithEndpoint are used, the last used option will // take precedence. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go index 14ad8c33b48de..c76bedfb167f7 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go @@ -5,5 +5,5 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" // Version is the current release version of the OpenTelemetry OTLP trace exporter in use. func Version() string { - return "1.28.0" + return "1.32.0" } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go index 3a0cc42f6a47f..ae92a42516666 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go @@ -13,7 +13,7 @@ import ( // unwrapper unwraps to return the underlying instrument implementation. type unwrapper interface { - Unwrap() metric.Observable + unwrap() metric.Observable } type afCounter struct { @@ -40,7 +40,7 @@ func (i *afCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *afCounter) Unwrap() metric.Observable { +func (i *afCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Float64ObservableCounter) } @@ -71,7 +71,7 @@ func (i *afUpDownCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *afUpDownCounter) Unwrap() metric.Observable { +func (i *afUpDownCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Float64ObservableUpDownCounter) } @@ -102,7 +102,7 @@ func (i *afGauge) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *afGauge) Unwrap() metric.Observable { +func (i *afGauge) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Float64ObservableGauge) } @@ -133,7 +133,7 @@ func (i *aiCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *aiCounter) Unwrap() metric.Observable { +func (i *aiCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Int64ObservableCounter) } @@ -164,7 +164,7 @@ func (i *aiUpDownCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *aiUpDownCounter) Unwrap() metric.Observable { +func (i *aiUpDownCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Int64ObservableUpDownCounter) } @@ -195,7 +195,7 @@ func (i *aiGauge) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *aiGauge) Unwrap() metric.Observable { +func (i *aiGauge) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Int64ObservableGauge) } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go index cfd1df9bfa288..a6acd8dca66ea 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/meter.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -5,8 +5,9 @@ package global // import "go.opentelemetry.io/otel/internal/global" import ( "container/list" + "context" + "reflect" "sync" - "sync/atomic" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/embedded" @@ -66,6 +67,7 @@ func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Me name: name, version: c.InstrumentationVersion(), schema: c.SchemaURL(), + attrs: c.InstrumentationAttributes(), } if p.meters == nil { @@ -76,7 +78,7 @@ func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Me return val } - t := &meter{name: name, opts: opts} + t := &meter{name: name, opts: opts, instruments: make(map[instID]delegatedInstrument)} p.meters[key] = t return t } @@ -92,17 +94,29 @@ type meter struct { opts []metric.MeterOption mtx sync.Mutex - instruments []delegatedInstrument + instruments map[instID]delegatedInstrument registry list.List - delegate atomic.Value // metric.Meter + delegate metric.Meter } type delegatedInstrument interface { setDelegate(metric.Meter) } +// instID are the identifying properties of a instrument. +type instID struct { + // name is the name of the stream. + name string + // description is the description of the stream. + description string + // kind defines the functional group of the instrument. + kind reflect.Type + // unit is the unit of the stream. + unit string +} + // setDelegate configures m to delegate all Meter functionality to Meters // created by provider. // @@ -110,12 +124,12 @@ type delegatedInstrument interface { // // It is guaranteed by the caller that this happens only once. func (m *meter) setDelegate(provider metric.MeterProvider) { - meter := provider.Meter(m.name, m.opts...) - m.delegate.Store(meter) - m.mtx.Lock() defer m.mtx.Unlock() + meter := provider.Meter(m.name, m.opts...) + m.delegate = meter + for _, inst := range m.instruments { inst.setDelegate(meter) } @@ -133,169 +147,336 @@ func (m *meter) setDelegate(provider metric.MeterProvider) { } func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64Counter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64Counter(name, options...) + } + + cfg := metric.NewInt64CounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*siCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64Counter), nil + } i := &siCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64UpDownCounter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64UpDownCounter(name, options...) + } + + cfg := metric.NewInt64UpDownCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*siUpDownCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64UpDownCounter), nil + } i := &siUpDownCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64Histogram(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64Histogram(name, options...) + } + + cfg := metric.NewInt64HistogramConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*siHistogram)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64Histogram), nil + } i := &siHistogram{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64Gauge(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64Gauge(name, options...) + } + + cfg := metric.NewInt64GaugeConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*siGauge)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64Gauge), nil + } i := &siGauge{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64ObservableCounter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64ObservableCounter(name, options...) + } + + cfg := metric.NewInt64ObservableCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*aiCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64ObservableCounter), nil + } i := &aiCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64ObservableUpDownCounter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64ObservableUpDownCounter(name, options...) + } + + cfg := metric.NewInt64ObservableUpDownCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*aiUpDownCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64ObservableUpDownCounter), nil + } i := &aiUpDownCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64ObservableGauge(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64ObservableGauge(name, options...) + } + + cfg := metric.NewInt64ObservableGaugeConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*aiGauge)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64ObservableGauge), nil + } i := &aiGauge{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64Counter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64Counter(name, options...) + } + + cfg := metric.NewFloat64CounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*sfCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64Counter), nil + } i := &sfCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64UpDownCounter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64UpDownCounter(name, options...) + } + + cfg := metric.NewFloat64UpDownCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*sfUpDownCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64UpDownCounter), nil + } i := &sfUpDownCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64Histogram(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64Histogram(name, options...) + } + + cfg := metric.NewFloat64HistogramConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*sfHistogram)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64Histogram), nil + } i := &sfHistogram{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64Gauge(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64Gauge(name, options...) + } + + cfg := metric.NewFloat64GaugeConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*sfGauge)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64Gauge), nil + } i := &sfGauge{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64ObservableCounter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64ObservableCounter(name, options...) + } + + cfg := metric.NewFloat64ObservableCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*afCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64ObservableCounter), nil + } i := &afCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64ObservableUpDownCounter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64ObservableUpDownCounter(name, options...) + } + + cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*afUpDownCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64ObservableUpDownCounter), nil + } i := &afUpDownCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64ObservableGauge(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64ObservableGauge(name, options...) + } + + cfg := metric.NewFloat64ObservableGaugeConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*afGauge)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64ObservableGauge), nil + } i := &afGauge{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } // RegisterCallback captures the function that will be called during Collect. func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - insts = unwrapInstruments(insts) - return del.RegisterCallback(f, insts...) - } - m.mtx.Lock() defer m.mtx.Unlock() + if m.delegate != nil { + return m.delegate.RegisterCallback(unwrapCallback(f), unwrapInstruments(insts)...) + } + reg := ®istration{instruments: insts, function: f} e := m.registry.PushBack(reg) reg.unreg = func() error { @@ -307,15 +488,11 @@ func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) return reg, nil } -type wrapped interface { - unwrap() metric.Observable -} - func unwrapInstruments(instruments []metric.Observable) []metric.Observable { out := make([]metric.Observable, 0, len(instruments)) for _, inst := range instruments { - if in, ok := inst.(wrapped); ok { + if in, ok := inst.(unwrapper); ok { out = append(out, in.unwrap()) } else { out = append(out, inst) @@ -335,9 +512,61 @@ type registration struct { unregMu sync.Mutex } -func (c *registration) setDelegate(m metric.Meter) { - insts := unwrapInstruments(c.instruments) +type unwrapObs struct { + embedded.Observer + obs metric.Observer +} +// unwrapFloat64Observable returns an expected metric.Float64Observable after +// unwrapping the global object. +func unwrapFloat64Observable(inst metric.Float64Observable) metric.Float64Observable { + if unwrapped, ok := inst.(unwrapper); ok { + if floatObs, ok := unwrapped.unwrap().(metric.Float64Observable); ok { + // Note: if the unwrapped object does not + // unwrap as an observable for either of the + // predicates here, it means an internal bug in + // this package. We avoid logging an error in + // this case, because the SDK has to try its + // own type conversion on the object. The SDK + // will see this and be forced to respond with + // its own error. + // + // This code uses a double-nested if statement + // to avoid creating a branch that is + // impossible to cover. + inst = floatObs + } + } + return inst +} + +// unwrapInt64Observable returns an expected metric.Int64Observable after +// unwrapping the global object. +func unwrapInt64Observable(inst metric.Int64Observable) metric.Int64Observable { + if unwrapped, ok := inst.(unwrapper); ok { + if unint, ok := unwrapped.unwrap().(metric.Int64Observable); ok { + // See the comment in unwrapFloat64Observable(). + inst = unint + } + } + return inst +} + +func (uo *unwrapObs) ObserveFloat64(inst metric.Float64Observable, value float64, opts ...metric.ObserveOption) { + uo.obs.ObserveFloat64(unwrapFloat64Observable(inst), value, opts...) +} + +func (uo *unwrapObs) ObserveInt64(inst metric.Int64Observable, value int64, opts ...metric.ObserveOption) { + uo.obs.ObserveInt64(unwrapInt64Observable(inst), value, opts...) +} + +func unwrapCallback(f metric.Callback) metric.Callback { + return func(ctx context.Context, obs metric.Observer) error { + return f(ctx, &unwrapObs{obs: obs}) + } +} + +func (c *registration) setDelegate(m metric.Meter) { c.unregMu.Lock() defer c.unregMu.Unlock() @@ -346,9 +575,10 @@ func (c *registration) setDelegate(m metric.Meter) { return } - reg, err := m.RegisterCallback(c.function, insts...) + reg, err := m.RegisterCallback(unwrapCallback(c.function), unwrapInstruments(c.instruments)...) if err != nil { GetErrorHandler().Handle(err) + return } c.unreg = reg.Unregister diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go index e31f442b48f9a..ac65262c6560d 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -87,6 +87,7 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T name: name, version: c.InstrumentationVersion(), schema: c.SchemaURL(), + attrs: c.InstrumentationAttributes(), } if p.tracers == nil { @@ -102,7 +103,12 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T return t } -type il struct{ name, version, schema string } +type il struct { + name string + version string + schema string + attrs attribute.Set +} // tracer is a placeholder for a trace.Tracer. // diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go index 3e7bb3b3566cd..b2fe3e41d3b18 100644 --- a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go +++ b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go @@ -20,11 +20,13 @@ func RawToBool(r uint64) bool { } func Int64ToRaw(i int64) uint64 { - return uint64(i) + // Assumes original was a valid int64 (overflow not checked). + return uint64(i) // nolint: gosec } func RawToInt64(r uint64) int64 { - return int64(r) + // Assumes original was a valid int64 (overflow not checked). + return int64(r) // nolint: gosec } func Float64ToRaw(f float64) uint64 { @@ -36,9 +38,11 @@ func RawToFloat64(r uint64) float64 { } func RawPtrToFloat64Ptr(r *uint64) *float64 { - return (*float64)(unsafe.Pointer(r)) + // Assumes original was a valid *float64 (overflow not checked). + return (*float64)(unsafe.Pointer(r)) // nolint: gosec } func RawPtrToInt64Ptr(r *uint64) *int64 { - return (*int64)(unsafe.Pointer(r)) + // Assumes original was a valid *int64 (overflow not checked). + return (*int64)(unsafe.Pointer(r)) // nolint: gosec } diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go index cf23db7780327..f8435d8f288dd 100644 --- a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go +++ b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go @@ -213,7 +213,7 @@ type Float64Observer interface { } // Float64Callback is a function registered with a Meter that makes -// observations for a Float64Observerable instrument it is registered with. +// observations for a Float64Observable instrument it is registered with. // Calls to the Float64Observer record measurement values for the // Float64Observable. // diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go index c82ba5324e225..e079aaef169e8 100644 --- a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go +++ b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go @@ -212,7 +212,7 @@ type Int64Observer interface { } // Int64Callback is a function registered with a Meter that makes observations -// for an Int64Observerable instrument it is registered with. Calls to the +// for an Int64Observable instrument it is registered with. Calls to the // Int64Observer record measurement values for the Int64Observable. // // The function needs to complete in a finite amount of time and the deadline diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go index ea52e402331b1..a535782e1d9a7 100644 --- a/vendor/go.opentelemetry.io/otel/metric/instrument.go +++ b/vendor/go.opentelemetry.io/otel/metric/instrument.go @@ -351,7 +351,7 @@ func WithAttributeSet(attributes attribute.Set) MeasurementOption { // // cp := make([]attribute.KeyValue, len(attributes)) // copy(cp, attributes) -// WithAttributes(attribute.NewSet(cp...)) +// WithAttributeSet(attribute.NewSet(cp...)) // // [attribute.NewSet] may modify the passed attributes so this will make a copy // of attributes before creating a set in order to ensure this function is diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go index 6a7991e0151cc..14e08c24a4be6 100644 --- a/vendor/go.opentelemetry.io/otel/metric/meter.go +++ b/vendor/go.opentelemetry.io/otel/metric/meter.go @@ -52,6 +52,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error) + // Int64UpDownCounter returns a new Int64UpDownCounter instrument // identified by name and configured with options. The instrument is used // to synchronously record int64 measurements during a computational @@ -61,6 +62,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error) + // Int64Histogram returns a new Int64Histogram instrument identified by // name and configured with options. The instrument is used to // synchronously record the distribution of int64 measurements during a @@ -70,6 +72,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error) + // Int64Gauge returns a new Int64Gauge instrument identified by name and // configured with options. The instrument is used to synchronously record // instantaneous int64 measurements during a computational operation. @@ -78,6 +81,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64Gauge(name string, options ...Int64GaugeOption) (Int64Gauge, error) + // Int64ObservableCounter returns a new Int64ObservableCounter identified // by name and configured with options. The instrument is used to // asynchronously record increasing int64 measurements once per a @@ -92,6 +96,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error) + // Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter // instrument identified by name and configured with options. The // instrument is used to asynchronously record int64 measurements once per @@ -106,6 +111,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error) + // Int64ObservableGauge returns a new Int64ObservableGauge instrument // identified by name and configured with options. The instrument is used // to asynchronously record instantaneous int64 measurements once per a @@ -130,6 +136,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error) + // Float64UpDownCounter returns a new Float64UpDownCounter instrument // identified by name and configured with options. The instrument is used // to synchronously record float64 measurements during a computational @@ -139,6 +146,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error) + // Float64Histogram returns a new Float64Histogram instrument identified by // name and configured with options. The instrument is used to // synchronously record the distribution of float64 measurements during a @@ -148,6 +156,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error) + // Float64Gauge returns a new Float64Gauge instrument identified by name and // configured with options. The instrument is used to synchronously record // instantaneous float64 measurements during a computational operation. @@ -156,6 +165,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64Gauge(name string, options ...Float64GaugeOption) (Float64Gauge, error) + // Float64ObservableCounter returns a new Float64ObservableCounter // instrument identified by name and configured with options. The // instrument is used to asynchronously record increasing float64 @@ -170,6 +180,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error) + // Float64ObservableUpDownCounter returns a new // Float64ObservableUpDownCounter instrument identified by name and // configured with options. The instrument is used to asynchronously record @@ -184,6 +195,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error) + // Float64ObservableGauge returns a new Float64ObservableGauge instrument // identified by name and configured with options. The instrument is used // to asynchronously record instantaneous float64 measurements once per a @@ -242,6 +254,7 @@ type Observer interface { // ObserveFloat64 records the float64 value for obsrv. ObserveFloat64(obsrv Float64Observable, value float64, opts ...ObserveOption) + // ObserveInt64 records the int64 value for obsrv. ObserveInt64(obsrv Int64Observable, value int64, opts ...ObserveOption) } diff --git a/vendor/go.opentelemetry.io/otel/renovate.json b/vendor/go.opentelemetry.io/otel/renovate.json index 8c5ac55ca93b3..0a29a2f13d802 100644 --- a/vendor/go.opentelemetry.io/otel/renovate.json +++ b/vendor/go.opentelemetry.io/otel/renovate.json @@ -19,6 +19,14 @@ "matchManagers": ["gomod"], "matchDepTypes": ["indirect"], "enabled": false + }, + { + "matchPackageNames": ["google.golang.org/genproto/googleapis/**"], + "groupName": "googleapis" + }, + { + "matchPackageNames": ["golang.org/x/**"], + "groupName": "golang.org/x" } ] } diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go index f4d1857c4f42f..f2cdf3c6518c9 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go @@ -4,5 +4,6 @@ package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" // Library represents the instrumentation library. -// Deprecated: please use Scope instead. +// +// Deprecated: use [Scope] instead. type Library = Scope diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go index 728115045bb4e..34852a47b2195 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go @@ -3,6 +3,8 @@ package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" +import "go.opentelemetry.io/otel/attribute" + // Scope represents the instrumentation scope. type Scope struct { // Name is the name of the instrumentation scope. This should be the @@ -12,4 +14,6 @@ type Scope struct { Version string // SchemaURL of the telemetry emitted by the scope. SchemaURL string + // Attributes of the telemetry emitted by the scope. + Attributes attribute.Set } diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go index 95a61d61d49c3..c02aeefdde531 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go @@ -7,7 +7,6 @@ import ( "context" "errors" "fmt" - "strings" ) // ErrPartialResource is returned by a detector when complete source @@ -57,62 +56,37 @@ func Detect(ctx context.Context, detectors ...Detector) (*Resource, error) { // these errors will be returned. Otherwise, nil is returned. func detect(ctx context.Context, res *Resource, detectors []Detector) error { var ( - r *Resource - errs detectErrs - err error + r *Resource + err error + e error ) for _, detector := range detectors { if detector == nil { continue } - r, err = detector.Detect(ctx) - if err != nil { - errs = append(errs, err) - if !errors.Is(err, ErrPartialResource) { + r, e = detector.Detect(ctx) + if e != nil { + err = errors.Join(err, e) + if !errors.Is(e, ErrPartialResource) { continue } } - r, err = Merge(res, r) - if err != nil { - errs = append(errs, err) + r, e = Merge(res, r) + if e != nil { + err = errors.Join(err, e) } *res = *r } - if len(errs) == 0 { - return nil - } - if errors.Is(errs, ErrSchemaURLConflict) { - // If there has been a merge conflict, ensure the resource has no - // schema URL. - res.schemaURL = "" - } - return errs -} - -type detectErrs []error - -func (e detectErrs) Error() string { - errStr := make([]string, len(e)) - for i, err := range e { - errStr[i] = fmt.Sprintf("* %s", err) - } - - format := "%d errors occurred detecting resource:\n\t%s" - return fmt.Sprintf(format, len(e), strings.Join(errStr, "\n\t")) -} + if err != nil { + if errors.Is(err, ErrSchemaURLConflict) { + // If there has been a merge conflict, ensure the resource has no + // schema URL. + res.schemaURL = "" + } -func (e detectErrs) Unwrap() error { - switch len(e) { - case 0: - return nil - case 1: - return e[0] + err = fmt.Errorf("error detecting resource: %w", err) } - return e[1:] -} - -func (e detectErrs) Is(target error) bool { - return len(e) != 0 && errors.Is(e[0], target) + return err } diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go index 6ac1cdbf7b456..cf3c88e15cd6a 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go @@ -20,15 +20,13 @@ type ( // telemetrySDK is a Detector that provides information about // the OpenTelemetry SDK used. This Detector is included as a // builtin. If these resource attributes are not wanted, use - // the WithTelemetrySDK(nil) or WithoutBuiltin() options to - // explicitly disable them. + // resource.New() to explicitly disable them. telemetrySDK struct{} // host is a Detector that provides information about the host // being run on. This Detector is included as a builtin. If // these resource attributes are not wanted, use the - // WithHost(nil) or WithoutBuiltin() options to explicitly - // disable them. + // resource.New() to explicitly disable them. host struct{} stringDetector struct { diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go index 71386e2da4c76..3677c83d7da33 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go @@ -10,17 +10,16 @@ import ( "golang.org/x/sys/windows/registry" ) -// implements hostIDReader +// implements hostIDReader. type hostIDReaderWindows struct{} -// read reads MachineGuid from the windows registry key: -// SOFTWARE\Microsoft\Cryptography +// read reads MachineGuid from the Windows registry key: +// SOFTWARE\Microsoft\Cryptography. func (*hostIDReaderWindows) read() (string, error) { k, err := registry.OpenKey( registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Cryptography`, registry.QUERY_VALUE|registry.WOW64_64KEY, ) - if err != nil { return "", err } diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go index 5e3d199d7856a..a6a5a53c0ea7a 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go @@ -17,7 +17,6 @@ import ( func platformOSDescription() (string, error) { k, err := registry.OpenKey( registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) - if err != nil { return "", err } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go index 1d399a75db2fe..ccc97e1b6625f 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go @@ -280,6 +280,7 @@ func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error { // // It is up to the exporter to implement any type of retry logic if a batch is failing // to be exported, since it is specific to the protocol and backend being sent to. + clear(bsp.batch) // Erase elements to let GC collect objects bsp.batch = bsp.batch[:0] if err != nil { @@ -316,7 +317,11 @@ func (bsp *batchSpanProcessor) processQueue() { bsp.batchMutex.Unlock() if shouldExport { if !bsp.timer.Stop() { - <-bsp.timer.C + // Handle both GODEBUG=asynctimerchan=[0|1] properly. + select { + case <-bsp.timer.C: + default: + } } if err := bsp.exportSpans(ctx); err != nil { otel.Handle(err) diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go b/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go index 821c83faa1dd3..8c308dd60a93a 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go @@ -12,25 +12,26 @@ import ( // evictedQueue is a FIFO queue with a configurable capacity. type evictedQueue[T any] struct { - queue []T - capacity int - droppedCount int - logDropped func() + queue []T + capacity int + droppedCount int + logDroppedMsg string + logDroppedOnce sync.Once } func newEvictedQueueEvent(capacity int) evictedQueue[Event] { // Do not pre-allocate queue, do this lazily. return evictedQueue[Event]{ - capacity: capacity, - logDropped: sync.OnceFunc(func() { global.Warn("limit reached: dropping trace trace.Event") }), + capacity: capacity, + logDroppedMsg: "limit reached: dropping trace trace.Event", } } func newEvictedQueueLink(capacity int) evictedQueue[Link] { // Do not pre-allocate queue, do this lazily. return evictedQueue[Link]{ - capacity: capacity, - logDropped: sync.OnceFunc(func() { global.Warn("limit reached: dropping trace trace.Link") }), + capacity: capacity, + logDroppedMsg: "limit reached: dropping trace trace.Link", } } @@ -53,6 +54,10 @@ func (eq *evictedQueue[T]) add(value T) { eq.queue = append(eq.queue, value) } +func (eq *evictedQueue[T]) logDropped() { + eq.logDroppedOnce.Do(func() { global.Warn(eq.logDroppedMsg) }) +} + // copy returns a copy of the evictedQueue. func (eq *evictedQueue[T]) copy() []T { return slices.Clone(eq.queue) diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go index 14c2e5bebda02..185aa7c08f7c3 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go @@ -139,9 +139,10 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T name = defaultTracerName } is := instrumentation.Scope{ - Name: name, - Version: c.InstrumentationVersion(), - SchemaURL: c.SchemaURL(), + Name: name, + Version: c.InstrumentationVersion(), + SchemaURL: c.SchemaURL(), + Attributes: c.InstrumentationAttributes(), } t, ok := func() (trace.Tracer, bool) { @@ -168,7 +169,7 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T // slowing down all tracing consumers. // - Logging code may be instrumented with tracing and deadlock because it could try // acquiring the same non-reentrant mutex. - global.Info("Tracer created", "name", name, "version", is.Version, "schemaURL", is.SchemaURL) + global.Info("Tracer created", "name", name, "version", is.Version, "schemaURL", is.SchemaURL, "attributes", is.Attributes) } return t } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go index 32f862790c7cc..d511d0f271f54 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go @@ -99,7 +99,7 @@ func (s snapshot) InstrumentationScope() instrumentation.Scope { // InstrumentationLibrary returns information about the instrumentation // library that created the span. -func (s snapshot) InstrumentationLibrary() instrumentation.Library { +func (s snapshot) InstrumentationLibrary() instrumentation.Library { //nolint:staticcheck // This method needs to be define for backwards compatibility return s.instrumentationScope } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go index ac90f1a260053..17f883c2c86fe 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -62,7 +62,7 @@ type ReadOnlySpan interface { // InstrumentationLibrary returns information about the instrumentation // library that created the span. // Deprecated: please use InstrumentationScope instead. - InstrumentationLibrary() instrumentation.Library + InstrumentationLibrary() instrumentation.Library //nolint:staticcheck // This method needs to be define for backwards compatibility // Resource returns information about the entity that produced the span. Resource() *resource.Resource // DroppedAttributes returns the number of attributes dropped by the span @@ -174,6 +174,17 @@ func (s *recordingSpan) IsRecording() bool { s.mu.Lock() defer s.mu.Unlock() + return s.isRecording() +} + +// isRecording returns if this span is being recorded. If this span has ended +// this will return false. +// +// This method assumes s.mu.Lock is held by the caller. +func (s *recordingSpan) isRecording() bool { + if s == nil { + return false + } return s.endTime.IsZero() } @@ -182,11 +193,15 @@ func (s *recordingSpan) IsRecording() bool { // included in the set status when the code is for an error. If this span is // not being recorded than this method does nothing. func (s *recordingSpan) SetStatus(code codes.Code, description string) { - if !s.IsRecording() { + if s == nil { return } + s.mu.Lock() defer s.mu.Unlock() + if !s.isRecording() { + return + } if s.status.Code > code { return } @@ -210,12 +225,15 @@ func (s *recordingSpan) SetStatus(code codes.Code, description string) { // attributes the span is configured to have, the last added attributes will // be dropped. func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) { - if !s.IsRecording() { + if s == nil || len(attributes) == 0 { return } s.mu.Lock() defer s.mu.Unlock() + if !s.isRecording() { + return + } limit := s.tracer.provider.spanLimits.AttributeCountLimit if limit == 0 { @@ -233,7 +251,7 @@ func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) { // Otherwise, add without deduplication. When attributes are read they // will be deduplicated, optimizing the operation. - s.attributes = slices.Grow(s.attributes, len(s.attributes)+len(attributes)) + s.attributes = slices.Grow(s.attributes, len(attributes)) for _, a := range attributes { if !a.Valid() { // Drop all invalid attributes. @@ -280,13 +298,17 @@ func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) { // Do not set a capacity when creating this map. Benchmark testing has // showed this to only add unused memory allocations in general use. - exists := make(map[attribute.Key]int) - s.dedupeAttrsFromRecord(&exists) + exists := make(map[attribute.Key]int, len(s.attributes)) + s.dedupeAttrsFromRecord(exists) // Now that s.attributes is deduplicated, adding unique attributes up to // the capacity of s will not over allocate s.attributes. - sum := len(attrs) + len(s.attributes) - s.attributes = slices.Grow(s.attributes, min(sum, limit)) + + // max size = limit + maxCap := min(len(attrs)+len(s.attributes), limit) + if cap(s.attributes) < maxCap { + s.attributes = slices.Grow(s.attributes, maxCap-cap(s.attributes)) + } for _, a := range attrs { if !a.Valid() { // Drop all invalid attributes. @@ -296,6 +318,7 @@ func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) { if idx, ok := exists[a.Key]; ok { // Perform all updates before dropping, even when at capacity. + a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a) s.attributes[idx] = a continue } @@ -386,9 +409,10 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) { // the span's duration in case some operation below takes a while. et := monotonicEndTime(s.startTime) - // Do relative expensive check now that we have an end time and see if we - // need to do any more processing. - if !s.IsRecording() { + // Lock the span now that we have an end time and see if we need to do any more processing. + s.mu.Lock() + if !s.isRecording() { + s.mu.Unlock() return } @@ -413,10 +437,11 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) { } if s.executionTracerTaskEnd != nil { + s.mu.Unlock() s.executionTracerTaskEnd() + s.mu.Lock() } - s.mu.Lock() // Setting endTime to non-zero marks the span as ended and not recording. if config.Timestamp().IsZero() { s.endTime = et @@ -450,7 +475,13 @@ func monotonicEndTime(start time.Time) time.Time { // does not change the Span status. If this span is not being recorded or err is nil // than this method does nothing. func (s *recordingSpan) RecordError(err error, opts ...trace.EventOption) { - if s == nil || err == nil || !s.IsRecording() { + if s == nil || err == nil { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + if !s.isRecording() { return } @@ -486,14 +517,23 @@ func recordStackTrace() string { } // AddEvent adds an event with the provided name and options. If this span is -// not being recorded than this method does nothing. +// not being recorded then this method does nothing. func (s *recordingSpan) AddEvent(name string, o ...trace.EventOption) { - if !s.IsRecording() { + if s == nil { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + if !s.isRecording() { return } s.addEvent(name, o...) } +// addEvent adds an event with the provided name and options. +// +// This method assumes s.mu.Lock is held by the caller. func (s *recordingSpan) addEvent(name string, o ...trace.EventOption) { c := trace.NewEventConfig(o...) e := Event{Name: name, Attributes: c.Attributes(), Time: c.Timestamp()} @@ -510,20 +550,21 @@ func (s *recordingSpan) addEvent(name string, o ...trace.EventOption) { e.Attributes = e.Attributes[:limit] } - s.mu.Lock() s.events.add(e) - s.mu.Unlock() } // SetName sets the name of this span. If this span is not being recorded than // this method does nothing. func (s *recordingSpan) SetName(name string) { - if !s.IsRecording() { + if s == nil { return } s.mu.Lock() defer s.mu.Unlock() + if !s.isRecording() { + return + } s.name = name } @@ -579,29 +620,26 @@ func (s *recordingSpan) Attributes() []attribute.KeyValue { func (s *recordingSpan) dedupeAttrs() { // Do not set a capacity when creating this map. Benchmark testing has // showed this to only add unused memory allocations in general use. - exists := make(map[attribute.Key]int) - s.dedupeAttrsFromRecord(&exists) + exists := make(map[attribute.Key]int, len(s.attributes)) + s.dedupeAttrsFromRecord(exists) } // dedupeAttrsFromRecord deduplicates the attributes of s to fit capacity // using record as the record of unique attribute keys to their index. // // This method assumes s.mu.Lock is held by the caller. -func (s *recordingSpan) dedupeAttrsFromRecord(record *map[attribute.Key]int) { +func (s *recordingSpan) dedupeAttrsFromRecord(record map[attribute.Key]int) { // Use the fact that slices share the same backing array. unique := s.attributes[:0] for _, a := range s.attributes { - if idx, ok := (*record)[a.Key]; ok { + if idx, ok := record[a.Key]; ok { unique[idx] = a } else { unique = append(unique, a) - (*record)[a.Key] = len(unique) - 1 + record[a.Key] = len(unique) - 1 } } - // s.attributes have element types of attribute.KeyValue. These types are - // not pointers and they themselves do not contain pointer fields, - // therefore the duplicate values do not need to be zeroed for them to be - // garbage collected. + clear(s.attributes[len(unique):]) // Erase unneeded elements to let GC collect objects. s.attributes = unique } @@ -642,7 +680,7 @@ func (s *recordingSpan) InstrumentationScope() instrumentation.Scope { // InstrumentationLibrary returns the instrumentation.Library associated with // the Tracer that created this span. -func (s *recordingSpan) InstrumentationLibrary() instrumentation.Library { +func (s *recordingSpan) InstrumentationLibrary() instrumentation.Library { //nolint:staticcheck // This method needs to be define for backwards compatibility s.mu.Lock() defer s.mu.Unlock() return s.tracer.instrumentationScope @@ -657,7 +695,7 @@ func (s *recordingSpan) Resource() *resource.Resource { } func (s *recordingSpan) AddLink(link trace.Link) { - if !s.IsRecording() { + if s == nil { return } if !link.SpanContext.IsValid() && len(link.Attributes) == 0 && @@ -665,6 +703,12 @@ func (s *recordingSpan) AddLink(link trace.Link) { return } + s.mu.Lock() + defer s.mu.Unlock() + if !s.isRecording() { + return + } + l := Link{SpanContext: link.SpanContext, Attributes: link.Attributes} // Discard attributes over limit. @@ -678,9 +722,7 @@ func (s *recordingSpan) AddLink(link trace.Link) { l.Attributes = l.Attributes[:limit] } - s.mu.Lock() s.links.add(l) - s.mu.Unlock() } // DroppedAttributes returns the number of attributes dropped by the span @@ -755,12 +797,16 @@ func (s *recordingSpan) snapshot() ReadOnlySpan { } func (s *recordingSpan) addChild() { - if !s.IsRecording() { + if s == nil { return } + s.mu.Lock() + defer s.mu.Unlock() + if !s.isRecording() { + return + } s.childSpanCount++ - s.mu.Unlock() } func (*recordingSpan) private() {} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go index 0a641f94889bb..cd2cc30ca2d67 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go @@ -45,22 +45,25 @@ func (s SpanStubs) Snapshots() []tracesdk.ReadOnlySpan { // SpanStub is a stand-in for a Span. type SpanStub struct { - Name string - SpanContext trace.SpanContext - Parent trace.SpanContext - SpanKind trace.SpanKind - StartTime time.Time - EndTime time.Time - Attributes []attribute.KeyValue - Events []tracesdk.Event - Links []tracesdk.Link - Status tracesdk.Status - DroppedAttributes int - DroppedEvents int - DroppedLinks int - ChildSpanCount int - Resource *resource.Resource - InstrumentationLibrary instrumentation.Library + Name string + SpanContext trace.SpanContext + Parent trace.SpanContext + SpanKind trace.SpanKind + StartTime time.Time + EndTime time.Time + Attributes []attribute.KeyValue + Events []tracesdk.Event + Links []tracesdk.Link + Status tracesdk.Status + DroppedAttributes int + DroppedEvents int + DroppedLinks int + ChildSpanCount int + Resource *resource.Resource + InstrumentationScope instrumentation.Scope + + // Deprecated: use InstrumentationScope instead. + InstrumentationLibrary instrumentation.Library //nolint:staticcheck // This method needs to be define for backwards compatibility } // SpanStubFromReadOnlySpan returns a SpanStub populated from ro. @@ -85,12 +88,18 @@ func SpanStubFromReadOnlySpan(ro tracesdk.ReadOnlySpan) SpanStub { DroppedLinks: ro.DroppedLinks(), ChildSpanCount: ro.ChildSpanCount(), Resource: ro.Resource(), + InstrumentationScope: ro.InstrumentationScope(), InstrumentationLibrary: ro.InstrumentationScope(), } } // Snapshot returns a read-only copy of the SpanStub. func (s SpanStub) Snapshot() tracesdk.ReadOnlySpan { + scopeOrLibrary := s.InstrumentationScope + if scopeOrLibrary.Name == "" && scopeOrLibrary.Version == "" && scopeOrLibrary.SchemaURL == "" { + scopeOrLibrary = s.InstrumentationLibrary + } + return spanSnapshot{ name: s.Name, spanContext: s.SpanContext, @@ -107,7 +116,7 @@ func (s SpanStub) Snapshot() tracesdk.ReadOnlySpan { droppedLinks: s.DroppedLinks, childSpanCount: s.ChildSpanCount, resource: s.Resource, - instrumentationScope: s.InstrumentationLibrary, + instrumentationScope: scopeOrLibrary, } } @@ -152,6 +161,6 @@ func (s spanSnapshot) InstrumentationScope() instrumentation.Scope { return s.instrumentationScope } -func (s spanSnapshot) InstrumentationLibrary() instrumentation.Library { +func (s spanSnapshot) InstrumentationLibrary() instrumentation.Library { //nolint:staticcheck // This method needs to be define for backwards compatibility return s.instrumentationScope } diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go index 33d065a7cb991..0b214d3fe9f45 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -5,5 +5,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk" // Version is the current release version of the OpenTelemetry SDK in use. func Version() string { - return "1.28.0" + return "1.32.0" } diff --git a/vendor/go.opentelemetry.io/otel/semconv/internal/http.go b/vendor/go.opentelemetry.io/otel/semconv/internal/http.go index ada857995dbb4..d5197e16ceddd 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/internal/http.go +++ b/vendor/go.opentelemetry.io/otel/semconv/internal/http.go @@ -115,7 +115,7 @@ func hostIPNamePort(hostWithPort string) (ip string, name string, port int) { name = hostPart } if parsedPort, err = strconv.ParseUint(portPart, 10, 16); err == nil { - port = int(parsedPort) + port = int(parsedPort) // nolint: gosec // Bit size of 16 checked above. } return } diff --git a/vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go b/vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go index aa9e1017156ab..1a820bdb30349 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go +++ b/vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go @@ -309,5 +309,5 @@ func splitHostPort(hostport string) (host string, port int) { if err != nil { return } - return host, int(p) + return host, int(p) // nolint: gosec // Bit size of 16 checked above. } diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/README.md deleted file mode 100644 index cfbc9055b32f7..0000000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Semconv v1.4.0 - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.4.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.4.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/doc.go deleted file mode 100644 index d83a66b9b473d..0000000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package semconv implements OpenTelemetry semantic conventions. -// -// OpenTelemetry semantic conventions are agreed standardized naming -// patterns for OpenTelemetry things. This package represents the conventions -// as of the v1.4.0 version of the OpenTelemetry specification. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.4.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/exception.go deleted file mode 100644 index 71a2ece3d376a..0000000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/exception.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.4.0" - -const ( - // ExceptionEventName is the name of the Span event representing an exception. - ExceptionEventName = "exception" -) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/http.go deleted file mode 100644 index f0c023cafbbc5..0000000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/http.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.4.0" - -import ( - "net/http" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/semconv/internal" - "go.opentelemetry.io/otel/trace" -) - -// HTTP scheme attributes. -var ( - HTTPSchemeHTTP = HTTPSchemeKey.String("http") - HTTPSchemeHTTPS = HTTPSchemeKey.String("https") -) - -var sc = &internal.SemanticConventions{ - EnduserIDKey: EnduserIDKey, - HTTPClientIPKey: HTTPClientIPKey, - HTTPFlavorKey: HTTPFlavorKey, - HTTPHostKey: HTTPHostKey, - HTTPMethodKey: HTTPMethodKey, - HTTPRequestContentLengthKey: HTTPRequestContentLengthKey, - HTTPRouteKey: HTTPRouteKey, - HTTPSchemeHTTP: HTTPSchemeHTTP, - HTTPSchemeHTTPS: HTTPSchemeHTTPS, - HTTPServerNameKey: HTTPServerNameKey, - HTTPStatusCodeKey: HTTPStatusCodeKey, - HTTPTargetKey: HTTPTargetKey, - HTTPURLKey: HTTPURLKey, - HTTPUserAgentKey: HTTPUserAgentKey, - NetHostIPKey: NetHostIPKey, - NetHostNameKey: NetHostNameKey, - NetHostPortKey: NetHostPortKey, - NetPeerIPKey: NetPeerIPKey, - NetPeerNameKey: NetPeerNameKey, - NetPeerPortKey: NetPeerPortKey, - NetTransportIP: NetTransportIP, - NetTransportOther: NetTransportOther, - NetTransportTCP: NetTransportTCP, - NetTransportUDP: NetTransportUDP, - NetTransportUnix: NetTransportUnix, -} - -// NetAttributesFromHTTPRequest generates attributes of the net -// namespace as specified by the OpenTelemetry specification for a -// span. The network parameter is a string that net.Dial function -// from standard library can understand. -func NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue { - return sc.NetAttributesFromHTTPRequest(network, request) -} - -// EndUserAttributesFromHTTPRequest generates attributes of the -// enduser namespace as specified by the OpenTelemetry specification -// for a span. -func EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { - return sc.EndUserAttributesFromHTTPRequest(request) -} - -// HTTPClientAttributesFromHTTPRequest generates attributes of the -// http namespace as specified by the OpenTelemetry specification for -// a span on the client side. -func HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { - return sc.HTTPClientAttributesFromHTTPRequest(request) -} - -// HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes -// to be used with server-side HTTP metrics. -func HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue { - return sc.HTTPServerMetricAttributesFromHTTPRequest(serverName, request) -} - -// HTTPServerAttributesFromHTTPRequest generates attributes of the -// http namespace as specified by the OpenTelemetry specification for -// a span on the server side. Currently, only basic authentication is -// supported. -func HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue { - return sc.HTTPServerAttributesFromHTTPRequest(serverName, route, request) -} - -// HTTPAttributesFromHTTPStatusCode generates attributes of the http -// namespace as specified by the OpenTelemetry specification for a -// span. -func HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue { - return sc.HTTPAttributesFromHTTPStatusCode(code) -} - -// SpanStatusFromHTTPStatusCode generates a status code and a message -// as specified by the OpenTelemetry specification for a span. -func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) { - return internal.SpanStatusFromHTTPStatusCode(code) -} - -// SpanStatusFromHTTPStatusCodeAndSpanKind generates a status code and a message -// as specified by the OpenTelemetry specification for a span. -// Exclude 4xx for SERVER to set the appropriate status. -func SpanStatusFromHTTPStatusCodeAndSpanKind(code int, spanKind trace.SpanKind) (codes.Code, string) { - return internal.SpanStatusFromHTTPStatusCodeAndSpanKind(code, spanKind) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/resource.go deleted file mode 100644 index 66c340c121380..0000000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/resource.go +++ /dev/null @@ -1,895 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.4.0" - -import "go.opentelemetry.io/otel/attribute" - -// A cloud environment (e.g. GCP, Azure, AWS) -const ( - // Name of the cloud provider. - // - // Type: Enum - // Required: No - // Stability: stable - // Examples: 'gcp' - CloudProviderKey = attribute.Key("cloud.provider") - // The cloud account ID the resource is assigned to. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '111111111111', 'opentelemetry' - CloudAccountIDKey = attribute.Key("cloud.account.id") - // The geographical region the resource is running. Refer to your provider's docs - // to see the available regions, for example [AWS - // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), - // [Azure regions](https://azure.microsoft.com/en-us/global- - // infrastructure/geographies/), or [Google Cloud - // regions](https://cloud.google.com/about/locations). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'us-central1', 'us-east-1' - CloudRegionKey = attribute.Key("cloud.region") - // Cloud regions often have multiple, isolated locations known as zones to - // increase availability. Availability zone represents the zone where the resource - // is running. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'us-east-1c' - // Note: Availability zones are called "zones" on Google Cloud. - CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") - // The cloud platform in use. - // - // Type: Enum - // Required: No - // Stability: stable - // Examples: 'aws_ec2', 'azure_vm', 'gcp_compute_engine' - // Note: The prefix of the service SHOULD match the one specified in - // `cloud.provider`. - CloudPlatformKey = attribute.Key("cloud.platform") -) - -var ( - // Amazon Web Services - CloudProviderAWS = CloudProviderKey.String("aws") - // Microsoft Azure - CloudProviderAzure = CloudProviderKey.String("azure") - // Google Cloud Platform - CloudProviderGCP = CloudProviderKey.String("gcp") -) - -var ( - // AWS Elastic Compute Cloud - CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") - // AWS Elastic Container Service - CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") - // AWS Elastic Kubernetes Service - CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") - // AWS Lambda - CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") - // AWS Elastic Beanstalk - CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") - // Azure Virtual Machines - CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") - // Azure Container Instances - CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") - // Azure Kubernetes Service - CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") - // Azure Functions - CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") - // Azure App Service - CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") - // Google Cloud Compute Engine (GCE) - CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") - // Google Cloud Run - CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") - // Google Cloud Kubernetes Engine (GKE) - CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") - // Google Cloud Functions (GCF) - CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") - // Google Cloud App Engine (GAE) - CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") -) - -// Resources used by AWS Elastic Container Service (ECS). -const ( - // The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws. - // amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'arn:aws:ecs:us- - // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' - AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") - // The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/develo - // perguide/clusters.html). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") - // The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/l - // aunch_types.html) for an ECS task. - // - // Type: Enum - // Required: No - // Stability: stable - // Examples: 'ec2', 'fargate' - AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") - // The ARN of an [ECS task definition](https://docs.aws.amazon.com/AmazonECS/lates - // t/developerguide/task_definitions.html). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'arn:aws:ecs:us- - // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' - AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") - // The task definition family this task definition is a member of. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry-family' - AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") - // The revision for this task definition. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '8', '26' - AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") -) - -var ( - // ec2 - AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") - // fargate - AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") -) - -// Resources used by AWS Elastic Kubernetes Service (EKS). -const ( - // The ARN of an EKS cluster. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") -) - -// Resources specific to Amazon Web Services. -const ( - // The name(s) of the AWS log group(s) an application is writing to. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: '/aws/lambda/my-function', 'opentelemetry-service' - // Note: Multiple log groups must be supported for cases like multi-container - // applications, where a single application has sidecar containers, and each write - // to their own log group. - AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") - // The Amazon Resource Name(s) (ARN) of the AWS log group(s). - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' - // Note: See the [log group ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- - // access-control-overview-cwl.html#CWL_ARN_Format). - AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") - // The name(s) of the AWS log stream(s) an application is writing to. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") - // The ARN(s) of the AWS log stream(s). - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log- - // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - // Note: See the [log stream ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- - // access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain - // several log streams, so these ARNs necessarily identify both a log group and a - // log stream. - AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") -) - -// A container instance. -const ( - // Container name. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry-autoconf' - ContainerNameKey = attribute.Key("container.name") - // Container ID. Usually a UUID, as for example used to [identify Docker - // containers](https://docs.docker.com/engine/reference/run/#container- - // identification). The UUID might be abbreviated. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'a3bf90e006b2' - ContainerIDKey = attribute.Key("container.id") - // The container runtime managing this container. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'docker', 'containerd', 'rkt' - ContainerRuntimeKey = attribute.Key("container.runtime") - // Name of the image the container was built on. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'gcr.io/opentelemetry/operator' - ContainerImageNameKey = attribute.Key("container.image.name") - // Container image tag. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '0.1' - ContainerImageTagKey = attribute.Key("container.image.tag") -) - -// The software deployment. -const ( - // Name of the [deployment - // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka - // deployment tier). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'staging', 'production' - DeploymentEnvironmentKey = attribute.Key("deployment.environment") -) - -// The device on which the process represented by this resource is running. -const ( - // A unique identifier representing the device - // - // Type: string - // Required: No - // Stability: stable - // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' - // Note: The device identifier MUST only be defined using the values outlined - // below. This value is not an advertising identifier and MUST NOT be used as - // such. On iOS (Swift or Objective-C), this value MUST be equal to the [vendor id - // entifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-iden - // tifierforvendor). On Android (Java or Kotlin), this value MUST be equal to the - // Firebase Installation ID or a globally unique UUID which is persisted across - // sessions in your application. More information can be found - // [here](https://developer.android.com/training/articles/user-data-ids) on best - // practices and exact implementation details. Caution should be taken when - // storing personal data or anything which can identify a user. GDPR and data - // protection laws may apply, ensure you do your own due diligence. - DeviceIDKey = attribute.Key("device.id") - // The model identifier for the device - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'iPhone3,4', 'SM-G920F' - // Note: It's recommended this value represents a machine readable version of the - // model identifier rather than the market or consumer-friendly name of the - // device. - DeviceModelIdentifierKey = attribute.Key("device.model.identifier") - // The marketing name for the device model - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' - // Note: It's recommended this value represents a human readable version of the - // device model rather than a machine readable alternative. - DeviceModelNameKey = attribute.Key("device.model.name") -) - -// A serverless instance. -const ( - // The name of the function being executed. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'my-function' - FaaSNameKey = attribute.Key("faas.name") - // The unique ID of the function being executed. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' - // Note: For example, in AWS Lambda this field corresponds to the - // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and- - // namespaces.html) value, in GCP to the URI of the resource, and in Azure to the - // [FunctionDirectory](https://github.com/Azure/azure-functions- - // host/wiki/Retrieving-information-about-the-currently-running-function) field. - FaaSIDKey = attribute.Key("faas.id") - // The version string of the function being executed as defined in [Version - // Attributes](../../resource/semantic_conventions/README.md#version-attributes). - // - // Type: string - // Required: No - // Stability: stable - // Examples: '2.0.0' - FaaSVersionKey = attribute.Key("faas.version") - // The execution environment ID as a string. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'my-function:instance-0001' - FaaSInstanceKey = attribute.Key("faas.instance") - // The amount of memory available to the serverless function in MiB. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 128 - // Note: It's recommended to set this attribute since e.g. too little memory can - // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, - // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this - // information. - FaaSMaxMemoryKey = attribute.Key("faas.max_memory") -) - -// A host is defined as a general computing instance. -const ( - // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud - // provider. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry-test' - HostIDKey = attribute.Key("host.id") - // Name of the host. On Unix systems, it may contain what the hostname command - // returns, or the fully qualified hostname, or another name specified by the - // user. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry-test' - HostNameKey = attribute.Key("host.name") - // Type of host. For Cloud, this must be the machine type. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'n1-standard-1' - HostTypeKey = attribute.Key("host.type") - // The CPU architecture the host system is running on. - // - // Type: Enum - // Required: No - // Stability: stable - HostArchKey = attribute.Key("host.arch") - // Name of the VM image or OS install the host was instantiated from. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' - HostImageNameKey = attribute.Key("host.image.name") - // VM image ID. For Cloud, this value is from the provider. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'ami-07b06b442921831e5' - HostImageIDKey = attribute.Key("host.image.id") - // The version string of the VM image as defined in [Version - // Attributes](README.md#version-attributes). - // - // Type: string - // Required: No - // Stability: stable - // Examples: '0.1' - HostImageVersionKey = attribute.Key("host.image.version") -) - -var ( - // AMD64 - HostArchAMD64 = HostArchKey.String("amd64") - // ARM32 - HostArchARM32 = HostArchKey.String("arm32") - // ARM64 - HostArchARM64 = HostArchKey.String("arm64") - // Itanium - HostArchIA64 = HostArchKey.String("ia64") - // 32-bit PowerPC - HostArchPPC32 = HostArchKey.String("ppc32") - // 64-bit PowerPC - HostArchPPC64 = HostArchKey.String("ppc64") - // 32-bit x86 - HostArchX86 = HostArchKey.String("x86") -) - -// A Kubernetes Cluster. -const ( - // The name of the cluster. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry-cluster' - K8SClusterNameKey = attribute.Key("k8s.cluster.name") -) - -// A Kubernetes Node object. -const ( - // The name of the Node. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'node-1' - K8SNodeNameKey = attribute.Key("k8s.node.name") - // The UID of the Node. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' - K8SNodeUIDKey = attribute.Key("k8s.node.uid") -) - -// A Kubernetes Namespace. -const ( - // The name of the namespace that the pod is running in. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'default' - K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") -) - -// A Kubernetes Pod object. -const ( - // The UID of the Pod. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SPodUIDKey = attribute.Key("k8s.pod.uid") - // The name of the Pod. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry-pod-autoconf' - K8SPodNameKey = attribute.Key("k8s.pod.name") -) - -// A container in a [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). -const ( - // The name of the Container in a Pod template. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'redis' - K8SContainerNameKey = attribute.Key("k8s.container.name") -) - -// A Kubernetes ReplicaSet object. -const ( - // The UID of the ReplicaSet. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SReplicasetUIDKey = attribute.Key("k8s.replicaset.uid") - // The name of the ReplicaSet. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry' - K8SReplicasetNameKey = attribute.Key("k8s.replicaset.name") -) - -// A Kubernetes Deployment object. -const ( - // The UID of the Deployment. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") - // The name of the Deployment. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry' - K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") -) - -// A Kubernetes StatefulSet object. -const ( - // The UID of the StatefulSet. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SStatefulsetUIDKey = attribute.Key("k8s.statefulset.uid") - // The name of the StatefulSet. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry' - K8SStatefulsetNameKey = attribute.Key("k8s.statefulset.name") -) - -// A Kubernetes DaemonSet object. -const ( - // The UID of the DaemonSet. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDaemonsetUIDKey = attribute.Key("k8s.daemonset.uid") - // The name of the DaemonSet. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry' - K8SDaemonsetNameKey = attribute.Key("k8s.daemonset.name") -) - -// A Kubernetes Job object. -const ( - // The UID of the Job. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SJobUIDKey = attribute.Key("k8s.job.uid") - // The name of the Job. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry' - K8SJobNameKey = attribute.Key("k8s.job.name") -) - -// A Kubernetes CronJob object. -const ( - // The UID of the CronJob. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") - // The name of the CronJob. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry' - K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") -) - -// The operating system (OS) on which the process represented by this resource is running. -const ( - // The operating system type. - // - // Type: Enum - // Required: Always - // Stability: stable - OSTypeKey = attribute.Key("os.type") - // Human readable (not intended to be parsed) OS version information, like e.g. - // reported by `ver` or `lsb_release -a` commands. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS' - OSDescriptionKey = attribute.Key("os.description") - // Human readable operating system name. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'iOS', 'Android', 'Ubuntu' - OSNameKey = attribute.Key("os.name") - // The version string of the operating system as defined in [Version - // Attributes](../../resource/semantic_conventions/README.md#version-attributes). - // - // Type: string - // Required: No - // Stability: stable - // Examples: '14.2.1', '18.04.1' - OSVersionKey = attribute.Key("os.version") -) - -var ( - // Microsoft Windows - OSTypeWindows = OSTypeKey.String("windows") - // Linux - OSTypeLinux = OSTypeKey.String("linux") - // Apple Darwin - OSTypeDarwin = OSTypeKey.String("darwin") - // FreeBSD - OSTypeFreeBSD = OSTypeKey.String("freebsd") - // NetBSD - OSTypeNetBSD = OSTypeKey.String("netbsd") - // OpenBSD - OSTypeOpenBSD = OSTypeKey.String("openbsd") - // DragonFly BSD - OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") - // HP-UX (Hewlett Packard Unix) - OSTypeHPUX = OSTypeKey.String("hpux") - // AIX (Advanced Interactive eXecutive) - OSTypeAIX = OSTypeKey.String("aix") - // Oracle Solaris - OSTypeSolaris = OSTypeKey.String("solaris") - // IBM z/OS - OSTypeZOS = OSTypeKey.String("z_os") -) - -// An operating system process. -const ( - // Process identifier (PID). - // - // Type: int - // Required: No - // Stability: stable - // Examples: 1234 - ProcessPIDKey = attribute.Key("process.pid") - // The name of the process executable. On Linux based systems, can be set to the - // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name of - // `GetProcessImageFileNameW`. - // - // Type: string - // Required: See below - // Stability: stable - // Examples: 'otelcol' - ProcessExecutableNameKey = attribute.Key("process.executable.name") - // The full path to the process executable. On Linux based systems, can be set to - // the target of `proc/[pid]/exe`. On Windows, can be set to the result of - // `GetProcessImageFileNameW`. - // - // Type: string - // Required: See below - // Stability: stable - // Examples: '/usr/bin/cmd/otelcol' - ProcessExecutablePathKey = attribute.Key("process.executable.path") - // The command used to launch the process (i.e. the command name). On Linux based - // systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, - // can be set to the first parameter extracted from `GetCommandLineW`. - // - // Type: string - // Required: See below - // Stability: stable - // Examples: 'cmd/otelcol' - ProcessCommandKey = attribute.Key("process.command") - // The full command used to launch the process as a single string representing the - // full command. On Windows, can be set to the result of `GetCommandLineW`. Do not - // set this if you have to assemble it just for monitoring; use - // `process.command_args` instead. - // - // Type: string - // Required: See below - // Stability: stable - // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' - ProcessCommandLineKey = attribute.Key("process.command_line") - // All the command arguments (including the command/executable itself) as received - // by the process. On Linux-based systems (and some other Unixoid systems - // supporting procfs), can be set according to the list of null-delimited strings - // extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be - // the full argv vector passed to `main`. - // - // Type: string[] - // Required: See below - // Stability: stable - // Examples: 'cmd/otecol', '--config=config.yaml' - ProcessCommandArgsKey = attribute.Key("process.command_args") - // The username of the user that owns the process. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'root' - ProcessOwnerKey = attribute.Key("process.owner") -) - -// The single (language) runtime instance which is monitored. -const ( - // The name of the runtime of this process. For compiled native binaries, this - // SHOULD be the name of the compiler. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'OpenJDK Runtime Environment' - ProcessRuntimeNameKey = attribute.Key("process.runtime.name") - // The version of the runtime of this process, as returned by the runtime without - // modification. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '14.0.2' - ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") - // An additional description about the runtime of the process, for example a - // specific vendor customization of the runtime environment. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' - ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") -) - -// A service instance. -const ( - // Logical name of the service. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'shoppingcart' - // Note: MUST be the same for all instances of horizontally scaled services. If - // the value was not specified, SDKs MUST fallback to `unknown_service:` - // concatenated with [`process.executable.name`](process.md#process), e.g. - // `unknown_service:bash`. If `process.executable.name` is not available, the - // value MUST be set to `unknown_service`. - ServiceNameKey = attribute.Key("service.name") - // A namespace for `service.name`. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Shop' - // Note: A string value having a meaning that helps to distinguish a group of - // services, for example the team name that owns a group of services. - // `service.name` is expected to be unique within the same namespace. If - // `service.namespace` is not specified in the Resource then `service.name` is - // expected to be unique for all services that have no explicit namespace defined - // (so the empty/unspecified namespace is simply one more valid namespace). Zero- - // length namespace string is assumed equal to unspecified namespace. - ServiceNamespaceKey = attribute.Key("service.namespace") - // The string ID of the service instance. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '627cc493-f310-47de-96bd-71410b7dec09' - // Note: MUST be unique for each instance of the same - // `service.namespace,service.name` pair (in other words - // `service.namespace,service.name,service.instance.id` triplet MUST be globally - // unique). The ID helps to distinguish instances of the same service that exist - // at the same time (e.g. instances of a horizontally scaled service). It is - // preferable for the ID to be persistent and stay the same for the lifetime of - // the service instance, however it is acceptable that the ID is ephemeral and - // changes during important lifetime events for the service (e.g. service - // restarts). If the service has no inherent unique ID that can be used as the - // value of this attribute it is recommended to generate a random Version 1 or - // Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use - // Version 5, see RFC 4122 for more recommendations). - ServiceInstanceIDKey = attribute.Key("service.instance.id") - // The version string of the service API or implementation. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '2.0.0' - ServiceVersionKey = attribute.Key("service.version") -) - -// The telemetry SDK used to capture data recorded by the instrumentation libraries. -const ( - // The name of the telemetry SDK as defined above. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry' - TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") - // The language of the telemetry SDK. - // - // Type: Enum - // Required: No - // Stability: stable - TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") - // The version string of the telemetry SDK. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '1.2.3' - TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") - // The version string of the auto instrumentation agent, if used. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '1.2.3' - TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") -) - -var ( - // cpp - TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") - // dotnet - TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") - // erlang - TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") - // go - TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") - // java - TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") - // nodejs - TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") - // php - TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") - // python - TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") - // ruby - TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") - // webjs - TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") -) - -// Resource describing the packaged software running the application code. Web engines are typically executed using process.runtime. -const ( - // The name of the web engine. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'WildFly' - WebEngineNameKey = attribute.Key("webengine.name") - // The version of the web engine. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '21.0.0' - WebEngineVersionKey = attribute.Key("webengine.version") - // Additional description of the web engine (e.g. detailed version and edition - // information). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final' - WebEngineDescriptionKey = attribute.Key("webengine.description") -) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/schema.go deleted file mode 100644 index b9457bc0b831f..0000000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/schema.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.4.0" - -// SchemaURL is the schema URL that matches the version of the semantic conventions -// that this package defines. Semconv packages starting from v1.4.0 must declare -// non-empty schema URL in the form https://opentelemetry.io/schemas/ -const SchemaURL = "https://opentelemetry.io/schemas/1.4.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/trace.go deleted file mode 100644 index 006482a30782a..0000000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/trace.go +++ /dev/null @@ -1,1367 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.4.0" - -import "go.opentelemetry.io/otel/attribute" - -// This document defines the attributes used to perform database client calls. -const ( - // An identifier for the database management system (DBMS) product being used. See - // below for a list of well-known identifiers. - // - // Type: Enum - // Required: Always - // Stability: stable - DBSystemKey = attribute.Key("db.system") - // The connection string used to connect to the database. It is recommended to - // remove embedded credentials. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' - DBConnectionStringKey = attribute.Key("db.connection_string") - // Username for accessing the database. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'readonly_user', 'reporting_user' - DBUserKey = attribute.Key("db.user") - // The fully-qualified class name of the [Java Database Connectivity - // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver - // used to connect. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'org.postgresql.Driver', - // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' - DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") - // If no [tech-specific attribute](#call-level-attributes-for-specific- - // technologies) is defined, this attribute is used to report the name of the - // database being accessed. For commands that switch the database, this should be - // set to the target database (even if the command fails). - // - // Type: string - // Required: Required, if applicable and no more-specific attribute is defined. - // Stability: stable - // Examples: 'customers', 'main' - // Note: In some SQL databases, the database name to be used is called "schema - // name". - DBNameKey = attribute.Key("db.name") - // The database statement being executed. - // - // Type: string - // Required: Required if applicable and not explicitly disabled via - // instrumentation configuration. - // Stability: stable - // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' - // Note: The value may be sanitized to exclude sensitive information. - DBStatementKey = attribute.Key("db.statement") - // The name of the operation being executed, e.g. the [MongoDB command - // name](https://docs.mongodb.com/manual/reference/command/#database-operations) - // such as `findAndModify`, or the SQL keyword. - // - // Type: string - // Required: Required, if `db.statement` is not applicable. - // Stability: stable - // Examples: 'findAndModify', 'HMSET', 'SELECT' - // Note: When setting this to an SQL keyword, it is not recommended to attempt any - // client-side parsing of `db.statement` just to get this property, but it should - // be set if the operation name is provided by the library being instrumented. If - // the SQL statement has an ambiguous operation, or performs more than one - // operation, this value may be omitted. - DBOperationKey = attribute.Key("db.operation") -) - -var ( - // Some other SQL database. Fallback only. See notes - DBSystemOtherSQL = DBSystemKey.String("other_sql") - // Microsoft SQL Server - DBSystemMSSQL = DBSystemKey.String("mssql") - // MySQL - DBSystemMySQL = DBSystemKey.String("mysql") - // Oracle Database - DBSystemOracle = DBSystemKey.String("oracle") - // IBM DB2 - DBSystemDB2 = DBSystemKey.String("db2") - // PostgreSQL - DBSystemPostgreSQL = DBSystemKey.String("postgresql") - // Amazon Redshift - DBSystemRedshift = DBSystemKey.String("redshift") - // Apache Hive - DBSystemHive = DBSystemKey.String("hive") - // Cloudscape - DBSystemCloudscape = DBSystemKey.String("cloudscape") - // HyperSQL DataBase - DBSystemHSQLDB = DBSystemKey.String("hsqldb") - // Progress Database - DBSystemProgress = DBSystemKey.String("progress") - // SAP MaxDB - DBSystemMaxDB = DBSystemKey.String("maxdb") - // SAP HANA - DBSystemHanaDB = DBSystemKey.String("hanadb") - // Ingres - DBSystemIngres = DBSystemKey.String("ingres") - // FirstSQL - DBSystemFirstSQL = DBSystemKey.String("firstsql") - // EnterpriseDB - DBSystemEDB = DBSystemKey.String("edb") - // InterSystems Caché - DBSystemCache = DBSystemKey.String("cache") - // Adabas (Adaptable Database System) - DBSystemAdabas = DBSystemKey.String("adabas") - // Firebird - DBSystemFirebird = DBSystemKey.String("firebird") - // Apache Derby - DBSystemDerby = DBSystemKey.String("derby") - // FileMaker - DBSystemFilemaker = DBSystemKey.String("filemaker") - // Informix - DBSystemInformix = DBSystemKey.String("informix") - // InstantDB - DBSystemInstantDB = DBSystemKey.String("instantdb") - // InterBase - DBSystemInterbase = DBSystemKey.String("interbase") - // MariaDB - DBSystemMariaDB = DBSystemKey.String("mariadb") - // Netezza - DBSystemNetezza = DBSystemKey.String("netezza") - // Pervasive PSQL - DBSystemPervasive = DBSystemKey.String("pervasive") - // PointBase - DBSystemPointbase = DBSystemKey.String("pointbase") - // SQLite - DBSystemSqlite = DBSystemKey.String("sqlite") - // Sybase - DBSystemSybase = DBSystemKey.String("sybase") - // Teradata - DBSystemTeradata = DBSystemKey.String("teradata") - // Vertica - DBSystemVertica = DBSystemKey.String("vertica") - // H2 - DBSystemH2 = DBSystemKey.String("h2") - // ColdFusion IMQ - DBSystemColdfusion = DBSystemKey.String("coldfusion") - // Apache Cassandra - DBSystemCassandra = DBSystemKey.String("cassandra") - // Apache HBase - DBSystemHBase = DBSystemKey.String("hbase") - // MongoDB - DBSystemMongoDB = DBSystemKey.String("mongodb") - // Redis - DBSystemRedis = DBSystemKey.String("redis") - // Couchbase - DBSystemCouchbase = DBSystemKey.String("couchbase") - // CouchDB - DBSystemCouchDB = DBSystemKey.String("couchdb") - // Microsoft Azure Cosmos DB - DBSystemCosmosDB = DBSystemKey.String("cosmosdb") - // Amazon DynamoDB - DBSystemDynamoDB = DBSystemKey.String("dynamodb") - // Neo4j - DBSystemNeo4j = DBSystemKey.String("neo4j") - // Apache Geode - DBSystemGeode = DBSystemKey.String("geode") - // Elasticsearch - DBSystemElasticsearch = DBSystemKey.String("elasticsearch") - // Memcached - DBSystemMemcached = DBSystemKey.String("memcached") - // CockroachDB - DBSystemCockroachdb = DBSystemKey.String("cockroachdb") -) - -// Connection-level attributes for Microsoft SQL Server -const ( - // The Microsoft SQL Server [instance name](https://docs.microsoft.com/en- - // us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) - // connecting to. This name is used to determine the port of a named instance. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'MSSQLSERVER' - // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no longer - // required (but still recommended if non-standard). - DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") -) - -// Call-level attributes for Cassandra -const ( - // The name of the keyspace being accessed. To be used instead of the generic - // `db.name` attribute. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'mykeyspace' - DBCassandraKeyspaceKey = attribute.Key("db.cassandra.keyspace") - // The fetch size used for paging, i.e. how many rows will be returned at once. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 5000 - DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") - // The consistency level of the query. Based on consistency values from - // [CQL](https://docs.datastax.com/en/cassandra- - // oss/3.0/cassandra/dml/dmlConfigConsistency.html). - // - // Type: Enum - // Required: No - // Stability: stable - DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") - // The name of the primary table that the operation is acting upon, including the - // schema name (if applicable). - // - // Type: string - // Required: Recommended if available. - // Stability: stable - // Examples: 'mytable' - // Note: This mirrors the db.sql.table attribute but references cassandra rather - // than sql. It is not recommended to attempt any client-side parsing of - // `db.statement` just to get this property, but it should be set if it is - // provided by the library being instrumented. If the operation is acting upon an - // anonymous table, or more than one table, this value MUST NOT be set. - DBCassandraTableKey = attribute.Key("db.cassandra.table") - // Whether or not the query is idempotent. - // - // Type: boolean - // Required: No - // Stability: stable - DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") - // The number of times a query was speculatively executed. Not set or `0` if the - // query was not executed speculatively. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 0, 2 - DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") - // The ID of the coordinating node for a query. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' - DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") - // The data center of the coordinating node for a query. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'us-west-2' - DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") -) - -var ( - // all - DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") - // each_quorum - DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") - // quorum - DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") - // local_quorum - DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") - // one - DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") - // two - DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") - // three - DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") - // local_one - DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") - // any - DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") - // serial - DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") - // local_serial - DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") -) - -// Call-level attributes for Apache HBase -const ( - // The [HBase namespace](https://hbase.apache.org/book.html#_namespace) being - // accessed. To be used instead of the generic `db.name` attribute. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'default' - DBHBaseNamespaceKey = attribute.Key("db.hbase.namespace") -) - -// Call-level attributes for Redis -const ( - // The index of the database being accessed as used in the [`SELECT` - // command](https://redis.io/commands/select), provided as an integer. To be used - // instead of the generic `db.name` attribute. - // - // Type: int - // Required: Required, if other than the default database (`0`). - // Stability: stable - // Examples: 0, 1, 15 - DBRedisDBIndexKey = attribute.Key("db.redis.database_index") -) - -// Call-level attributes for MongoDB -const ( - // The collection being accessed within the database stated in `db.name`. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'customers', 'products' - DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") -) - -// Call-level attrbiutes for SQL databases -const ( - // The name of the primary table that the operation is acting upon, including the - // schema name (if applicable). - // - // Type: string - // Required: Recommended if available. - // Stability: stable - // Examples: 'public.users', 'customers' - // Note: It is not recommended to attempt any client-side parsing of - // `db.statement` just to get this property, but it should be set if it is - // provided by the library being instrumented. If the operation is acting upon an - // anonymous table, or more than one table, this value MUST NOT be set. - DBSQLTableKey = attribute.Key("db.sql.table") -) - -// This document defines the attributes used to report a single exception associated with a span. -const ( - // The type of the exception (its fully-qualified class name, if applicable). The - // dynamic type of the exception should be preferred over the static type in - // languages that support it. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'java.net.ConnectException', 'OSError' - ExceptionTypeKey = attribute.Key("exception.type") - // The exception message. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly" - ExceptionMessageKey = attribute.Key("exception.message") - // A stacktrace as a string in the natural representation for the language - // runtime. The representation is to be determined and documented by each language - // SIG. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test - // exception\\n at ' - // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' - // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' - // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' - ExceptionStacktraceKey = attribute.Key("exception.stacktrace") - // SHOULD be set to true if the exception event is recorded at a point where it is - // known that the exception is escaping the scope of the span. - // - // Type: boolean - // Required: No - // Stability: stable - // Note: An exception is considered to have escaped (or left) the scope of a span, - // if that span is ended while the exception is still logically "in flight". - // This may be actually "in flight" in some languages (e.g. if the exception - // is passed to a Context manager's `__exit__` method in Python) but will - // usually be caught at the point of recording the exception in most languages. - - // It is usually not possible to determine at the point where an exception is - // thrown - // whether it will escape the scope of a span. - // However, it is trivial to know that an exception - // will escape, if one checks for an active exception just before ending the span, - // as done in the [example above](#exception-end-example). - - // It follows that an exception may still escape the scope of the span - // even if the `exception.escaped` attribute was not set or set to false, - // since the event might have been recorded at a time where it was not - // clear whether the exception will escape. - ExceptionEscapedKey = attribute.Key("exception.escaped") -) - -// This semantic convention describes an instance of a function that runs without provisioning or managing of servers (also known as serverless functions or Function as a Service (FaaS)) with spans. -const ( - // Type of the trigger on which the function is executed. - // - // Type: Enum - // Required: On FaaS instances, faas.trigger MUST be set on incoming invocations. - // Clients invoking FaaS instances MUST set `faas.trigger` on outgoing - // invocations, if it is known to the client. This is, for example, not the case, - // when the transport layer is abstracted in a FaaS client framework without - // access to its configuration. - // Stability: stable - FaaSTriggerKey = attribute.Key("faas.trigger") - // The execution ID of the current function execution. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' - FaaSExecutionKey = attribute.Key("faas.execution") -) - -var ( - // A response to some data source operation such as a database or filesystem read/write - FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") - // To provide an answer to an inbound HTTP request - FaaSTriggerHTTP = FaaSTriggerKey.String("http") - // A function is set to be executed when messages are sent to a messaging system - FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") - // A function is scheduled to be executed regularly - FaaSTriggerTimer = FaaSTriggerKey.String("timer") - // If none of the others apply - FaaSTriggerOther = FaaSTriggerKey.String("other") -) - -// Semantic Convention for FaaS triggered as a response to some data source operation such as a database or filesystem read/write. -const ( - // The name of the source on which the triggering operation was performed. For - // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos - // DB to the database name. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'myBucketName', 'myDBName' - FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") - // Describes the type of the operation that was performed on the data. - // - // Type: Enum - // Required: Always - // Stability: stable - FaaSDocumentOperationKey = attribute.Key("faas.document.operation") - // A string containing the time when the data was accessed in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed - // in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // Required: Always - // Stability: stable - // Examples: '2020-01-23T13:47:06Z' - FaaSDocumentTimeKey = attribute.Key("faas.document.time") - // The document name/table subjected to the operation. For example, in Cloud - // Storage or S3 is the name of the file, and in Cosmos DB the table name. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'myFile.txt', 'myTableName' - FaaSDocumentNameKey = attribute.Key("faas.document.name") -) - -var ( - // When a new object is created - FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") - // When an object is modified - FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") - // When an object is deleted - FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") -) - -// Semantic Convention for FaaS scheduled to be executed regularly. -const ( - // A string containing the function invocation time in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed - // in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // Required: Always - // Stability: stable - // Examples: '2020-01-23T13:47:06Z' - FaaSTimeKey = attribute.Key("faas.time") - // A string containing the schedule period as [Cron Expression](https://docs.oracl - // e.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). - // - // Type: string - // Required: No - // Stability: stable - // Examples: '0/5 * * * ? *' - FaaSCronKey = attribute.Key("faas.cron") -) - -// Contains additional attributes for incoming FaaS spans. -const ( - // A boolean that is true if the serverless function is executed for the first - // time (aka cold-start). - // - // Type: boolean - // Required: No - // Stability: stable - FaaSColdstartKey = attribute.Key("faas.coldstart") -) - -// Contains additional attributes for outgoing FaaS spans. -const ( - // The name of the invoked function. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'my-function' - // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked - // function. - FaaSInvokedNameKey = attribute.Key("faas.invoked_name") - // The cloud provider of the invoked function. - // - // Type: Enum - // Required: Always - // Stability: stable - // Examples: 'aws' - // Note: SHOULD be equal to the `cloud.provider` resource attribute of the invoked - // function. - FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") - // The cloud region of the invoked function. - // - // Type: string - // Required: For some cloud providers, like AWS or GCP, the region in which a - // function is hosted is essential to uniquely identify the function and also part - // of its endpoint. Since it's part of the endpoint being called, the region is - // always known to clients. In these cases, `faas.invoked_region` MUST be set - // accordingly. If the region is unknown to the client or not required for - // identifying the invoked function, setting `faas.invoked_region` is optional. - // Stability: stable - // Examples: 'eu-central-1' - // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked - // function. - FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") -) - -var ( - // Amazon Web Services - FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") - // Microsoft Azure - FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") - // Google Cloud Platform - FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") -) - -// These attributes may be used for any network related operation. -const ( - // Transport protocol used. See note below. - // - // Type: Enum - // Required: No - // Stability: stable - // Examples: 'ip_tcp' - NetTransportKey = attribute.Key("net.transport") - // Remote address of the peer (dotted decimal for IPv4 or - // [RFC5952](https://tools.ietf.org/html/rfc5952) for IPv6) - // - // Type: string - // Required: No - // Stability: stable - // Examples: '127.0.0.1' - NetPeerIPKey = attribute.Key("net.peer.ip") - // Remote port number. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 80, 8080, 443 - NetPeerPortKey = attribute.Key("net.peer.port") - // Remote hostname or similar, see note below. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'example.com' - NetPeerNameKey = attribute.Key("net.peer.name") - // Like `net.peer.ip` but for the host IP. Useful in case of a multi-IP host. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '192.168.0.1' - NetHostIPKey = attribute.Key("net.host.ip") - // Like `net.peer.port` but for the host port. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 35555 - NetHostPortKey = attribute.Key("net.host.port") - // Local hostname or similar, see note below. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'localhost' - NetHostNameKey = attribute.Key("net.host.name") -) - -var ( - // ip_tcp - NetTransportTCP = NetTransportKey.String("ip_tcp") - // ip_udp - NetTransportUDP = NetTransportKey.String("ip_udp") - // Another IP-based protocol - NetTransportIP = NetTransportKey.String("ip") - // Unix Domain socket. See below - NetTransportUnix = NetTransportKey.String("unix") - // Named or anonymous pipe. See note below - NetTransportPipe = NetTransportKey.String("pipe") - // In-process communication - NetTransportInProc = NetTransportKey.String("inproc") - // Something else (non IP-based) - NetTransportOther = NetTransportKey.String("other") -) - -// Operations that access some remote service. -const ( - // The [`service.name`](../../resource/semantic_conventions/README.md#service) of - // the remote service. SHOULD be equal to the actual `service.name` resource - // attribute of the remote service if any. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'AuthTokenCache' - PeerServiceKey = attribute.Key("peer.service") -) - -// These attributes may be used for any operation with an authenticated and/or authorized enduser. -const ( - // Username or client_id extracted from the access token or - // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in the - // inbound request from outside the system. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'username' - EnduserIDKey = attribute.Key("enduser.id") - // Actual/assumed role the client is making the request under extracted from token - // or application security context. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'admin' - EnduserRoleKey = attribute.Key("enduser.role") - // Scopes or granted authorities the client currently possesses extracted from - // token or application security context. The value would come from the scope - // associated with an [OAuth 2.0 Access - // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute value - // in a [SAML 2.0 Assertion](http://docs.oasis- - // open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'read:message, write:files' - EnduserScopeKey = attribute.Key("enduser.scope") -) - -// These attributes may be used for any operation to store information about a thread that started a span. -const ( - // Current "managed" thread ID (as opposed to OS thread ID). - // - // Type: int - // Required: No - // Stability: stable - // Examples: 42 - ThreadIDKey = attribute.Key("thread.id") - // Current thread name. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'main' - ThreadNameKey = attribute.Key("thread.name") -) - -// These attributes allow to report this unit of code and therefore to provide more context about the span. -const ( - // The method or function name, or equivalent (usually rightmost part of the code - // unit's name). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'serveRequest' - CodeFunctionKey = attribute.Key("code.function") - // The "namespace" within which `code.function` is defined. Usually the qualified - // class or module name, such that `code.namespace` + some separator + - // `code.function` form a unique identifier for the code unit. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'com.example.MyHTTPService' - CodeNamespaceKey = attribute.Key("code.namespace") - // The source code file name that identifies the code unit as uniquely as possible - // (preferably an absolute file path). - // - // Type: string - // Required: No - // Stability: stable - // Examples: '/usr/local/MyApplication/content_root/app/index.php' - CodeFilepathKey = attribute.Key("code.filepath") - // The line number in `code.filepath` best representing the operation. It SHOULD - // point within the code unit named in `code.function`. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 42 - CodeLineNumberKey = attribute.Key("code.lineno") -) - -// This document defines semantic conventions for HTTP client and server Spans. -const ( - // HTTP request method. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'GET', 'POST', 'HEAD' - HTTPMethodKey = attribute.Key("http.method") - // Full HTTP request URL in the form `scheme://host[:port]/path?query[#fragment]`. - // Usually the fragment is not transmitted over HTTP, but if it is known, it - // should be included nevertheless. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' - // Note: `http.url` MUST NOT contain credentials passed via URL in form of - // `https://username:password@www.example.com/`. In such case the attribute's - // value should be `https://www.example.com/`. - HTTPURLKey = attribute.Key("http.url") - // The full request target as passed in a HTTP request line or equivalent. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '/path/12314/?q=ddds#123' - HTTPTargetKey = attribute.Key("http.target") - // The value of the [HTTP host - // header](https://tools.ietf.org/html/rfc7230#section-5.4). When the header is - // empty or not present, this attribute should be the same. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'www.example.org' - HTTPHostKey = attribute.Key("http.host") - // The URI scheme identifying the used protocol. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'http', 'https' - HTTPSchemeKey = attribute.Key("http.scheme") - // [HTTP response status code](https://tools.ietf.org/html/rfc7231#section-6). - // - // Type: int - // Required: If and only if one was received/sent. - // Stability: stable - // Examples: 200 - HTTPStatusCodeKey = attribute.Key("http.status_code") - // Kind of HTTP protocol used. - // - // Type: Enum - // Required: No - // Stability: stable - // Examples: '1.0' - // Note: If `net.transport` is not specified, it can be assumed to be `IP.TCP` - // except if `http.flavor` is `QUIC`, in which case `IP.UDP` is assumed. - HTTPFlavorKey = attribute.Key("http.flavor") - // Value of the [HTTP User- - // Agent](https://tools.ietf.org/html/rfc7231#section-5.5.3) header sent by the - // client. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' - HTTPUserAgentKey = attribute.Key("http.user_agent") - // The size of the request payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as the - // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For - // requests using transport encoding, this should be the compressed size. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 3495 - HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") - // The size of the uncompressed request payload body after transport decoding. Not - // set if transport encoding not used. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 5493 - HTTPRequestContentLengthUncompressedKey = attribute.Key("http.request_content_length_uncompressed") - // The size of the response payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as the - // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For - // requests using transport encoding, this should be the compressed size. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 3495 - HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") - // The size of the uncompressed response payload body after transport decoding. - // Not set if transport encoding not used. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 5493 - HTTPResponseContentLengthUncompressedKey = attribute.Key("http.response_content_length_uncompressed") -) - -var ( - // HTTP 1.0 - HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") - // HTTP 1.1 - HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") - // HTTP 2 - HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") - // SPDY protocol - HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") - // QUIC protocol - HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") -) - -// Semantic Convention for HTTP Server -const ( - // The primary server name of the matched virtual host. This should be obtained - // via configuration. If no such configuration can be obtained, this attribute - // MUST NOT be set ( `net.host.name` should be used instead). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'example.com' - // Note: `http.url` is usually not readily available on the server side but would - // have to be assembled in a cumbersome and sometimes lossy process from other - // information (see e.g. open-telemetry/opentelemetry-python/pull/148). It is thus - // preferred to supply the raw data that is available. - HTTPServerNameKey = attribute.Key("http.server_name") - // The matched route (path template). - // - // Type: string - // Required: No - // Stability: stable - // Examples: '/users/:userID?' - HTTPRouteKey = attribute.Key("http.route") - // The IP address of the original client behind all proxies, if known (e.g. from - // [X-Forwarded-For](https://developer.mozilla.org/en- - // US/docs/Web/HTTP/Headers/X-Forwarded-For)). - // - // Type: string - // Required: No - // Stability: stable - // Examples: '83.164.160.102' - // Note: This is not necessarily the same as `net.peer.ip`, which would identify - // the network-level peer, which may be a proxy. - HTTPClientIPKey = attribute.Key("http.client_ip") -) - -// Attributes that exist for multiple DynamoDB request types. -const ( - // The keys in the `RequestItems` object field. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: 'Users', 'Cats' - AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") - // The JSON-serialized value of each item in the `ConsumedCapacity` response - // field. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : { - // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": - // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, - // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, - // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, - // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": - // "string", "WriteCapacityUnits": number }' - AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") - // The JSON-serialized value of the `ItemCollectionMetrics` response field. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, - // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : - // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": - // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }' - AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") - // The value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. - // - // Type: double - // Required: No - // Stability: stable - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") - // The value of the `ProvisionedThroughput.WriteCapacityUnits` request parameter. - // - // Type: double - // Required: No - // Stability: stable - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") - // The value of the `ConsistentRead` request parameter. - // - // Type: boolean - // Required: No - // Stability: stable - AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") - // The value of the `ProjectionExpression` request parameter. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems, - // ProductReviews' - AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") - // The value of the `Limit` request parameter. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 10 - AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") - // The value of the `AttributesToGet` request parameter. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: 'lives', 'id' - AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") - // The value of the `IndexName` request parameter. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'name_to_group' - AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") - // The value of the `Select` request parameter. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'ALL_ATTRIBUTES', 'COUNT' - AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") -) - -// DynamoDB.CreateTable -const ( - // The JSON-serialized value of each item of the `GlobalSecondaryIndexes` request - // field - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string", - // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], - // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits": - // number, "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") - // The JSON-serialized value of each item of the `LocalSecondaryIndexes` request - // field. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes": - // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", - // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], - // "ProjectionType": "string" } }' - AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") -) - -// DynamoDB.ListTables -const ( - // The value of the `ExclusiveStartTableName` request parameter. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Users', 'CatsTable' - AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") - // The number of items in the `TableNames` response parameter. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 20 - AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") -) - -// DynamoDB.Query -const ( - // The value of the `ScanIndexForward` request parameter. - // - // Type: boolean - // Required: No - // Stability: stable - AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") -) - -// DynamoDB.Scan -const ( - // The value of the `Segment` request parameter. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 10 - AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") - // The value of the `TotalSegments` request parameter. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 100 - AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") - // The value of the `Count` response parameter. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 10 - AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") - // The value of the `ScannedCount` response parameter. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 50 - AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") -) - -// DynamoDB.UpdateTable -const ( - // The JSON-serialized value of each item in the `AttributeDefinitions` request - // field. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' - AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") - // The JSON-serialized value of each item in the `GlobalSecondaryIndexUpdates` - // request field. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, - // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": - // number } }' - AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") -) - -// This document defines the attributes used in messaging systems. -const ( - // A string identifying the messaging system. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'kafka', 'rabbitmq', 'activemq', 'AmazonSQS' - MessagingSystemKey = attribute.Key("messaging.system") - // The message destination name. This might be equal to the span name but is - // required nevertheless. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'MyQueue', 'MyTopic' - MessagingDestinationKey = attribute.Key("messaging.destination") - // The kind of message destination - // - // Type: Enum - // Required: Required only if the message destination is either a `queue` or - // `topic`. - // Stability: stable - MessagingDestinationKindKey = attribute.Key("messaging.destination_kind") - // A boolean that is true if the message destination is temporary. - // - // Type: boolean - // Required: If missing, it is assumed to be false. - // Stability: stable - MessagingTempDestinationKey = attribute.Key("messaging.temp_destination") - // The name of the transport protocol. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'AMQP', 'MQTT' - MessagingProtocolKey = attribute.Key("messaging.protocol") - // The version of the transport protocol. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '0.9.1' - MessagingProtocolVersionKey = attribute.Key("messaging.protocol_version") - // Connection string. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'tibjmsnaming://localhost:7222', - // 'https://queue.amazonaws.com/80398EXAMPLE/MyQueue' - MessagingURLKey = attribute.Key("messaging.url") - // A value used by the messaging system as an identifier for the message, - // represented as a string. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '452a7c7c7c7048c2f887f61572b18fc2' - MessagingMessageIDKey = attribute.Key("messaging.message_id") - // The [conversation ID](#conversations) identifying the conversation to which the - // message belongs, represented as a string. Sometimes called "Correlation ID". - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'MyConversationID' - MessagingConversationIDKey = attribute.Key("messaging.conversation_id") - // The (uncompressed) size of the message payload in bytes. Also use this - // attribute if it is unknown whether the compressed or uncompressed payload size - // is reported. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 2738 - MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message_payload_size_bytes") - // The compressed size of the message payload in bytes. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 2048 - MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message_payload_compressed_size_bytes") -) - -var ( - // A message sent to a queue - MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue") - // A message sent to a topic - MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic") -) - -// Semantic convention for a consumer of messages received from a messaging system -const ( - // A string identifying the kind of message consumption as defined in the - // [Operation names](#operation-names) section above. If the operation is "send", - // this attribute MUST NOT be set, since the operation can be inferred from the - // span kind in that case. - // - // Type: Enum - // Required: No - // Stability: stable - MessagingOperationKey = attribute.Key("messaging.operation") -) - -var ( - // receive - MessagingOperationReceive = MessagingOperationKey.String("receive") - // process - MessagingOperationProcess = MessagingOperationKey.String("process") -) - -// Attributes for RabbitMQ -const ( - // RabbitMQ message routing key. - // - // Type: string - // Required: Unless it is empty. - // Stability: stable - // Examples: 'myKey' - MessagingRabbitmqRoutingKeyKey = attribute.Key("messaging.rabbitmq.routing_key") -) - -// Attributes for Apache Kafka -const ( - // Message keys in Kafka are used for grouping alike messages to ensure they're - // processed on the same partition. They differ from `messaging.message_id` in - // that they're not unique. If the key is `null`, the attribute MUST NOT be set. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'myKey' - // Note: If the key type is not string, it's string representation has to be - // supplied for the attribute. If the key has no unambiguous, canonical string - // form, don't include its value. - MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message_key") - // Name of the Kafka Consumer Group that is handling the message. Only applies to - // consumers, not producers. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'my-group' - MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer_group") - // Client ID for the Consumer or Producer that is handling the message. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'client-5' - MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") - // Partition the message is sent to. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 2 - MessagingKafkaPartitionKey = attribute.Key("messaging.kafka.partition") - // A boolean that is true if the message is a tombstone. - // - // Type: boolean - // Required: If missing, it is assumed to be false. - // Stability: stable - MessagingKafkaTombstoneKey = attribute.Key("messaging.kafka.tombstone") -) - -// This document defines semantic conventions for remote procedure calls. -const ( - // A string identifying the remoting system. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'grpc', 'java_rmi', 'wcf' - RPCSystemKey = attribute.Key("rpc.system") - // The full name of the service being called, including its package name, if - // applicable. - // - // Type: string - // Required: No, but recommended - // Stability: stable - // Examples: 'myservice.EchoService' - RPCServiceKey = attribute.Key("rpc.service") - // The name of the method being called, must be equal to the $method part in the - // span name. - // - // Type: string - // Required: No, but recommended - // Stability: stable - // Examples: 'exampleMethod' - RPCMethodKey = attribute.Key("rpc.method") -) - -// Tech-specific attributes for gRPC. -const ( - // The [numeric status - // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of the gRPC - // request. - // - // Type: Enum - // Required: Always - // Stability: stable - // Examples: 0, 1, 16 - RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") -) - -var ( - // OK - RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) - // CANCELLED - RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) - // UNKNOWN - RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) - // INVALID_ARGUMENT - RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) - // DEADLINE_EXCEEDED - RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) - // NOT_FOUND - RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) - // ALREADY_EXISTS - RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) - // PERMISSION_DENIED - RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) - // RESOURCE_EXHAUSTED - RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) - // FAILED_PRECONDITION - RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) - // ABORTED - RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) - // OUT_OF_RANGE - RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) - // UNIMPLEMENTED - RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) - // INTERNAL - RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) - // UNAVAILABLE - RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) - // DATA_LOSS - RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) - // UNAUTHENTICATED - RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) -) - -// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). -const ( - // Protocol version as in `jsonrpc` property of request/response. Since JSON-RPC - // 1.0 does not specify this, the value can be omitted. - // - // Type: string - // Required: If missing, it is assumed to be "1.0". - // Stability: stable - // Examples: '2.0', '1.0' - RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") - // `method` property from request. Unlike `rpc.method`, this may not relate to the - // actual method being called. Useful for client-side traces since client does not - // know what will be called on the server. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'users.create', 'get_users' - RPCJsonrpcMethodKey = attribute.Key("rpc.jsonrpc.method") - // `id` property of request or response. Since protocol allows id to be int, - // string, `null` or missing (for notifications), value is expected to be cast to - // string for simplicity. Use empty string in case of `null` value. Omit entirely - // if this is a notification. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '10', 'request-7', '' - RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") - // `error.code` property of response if it is an error response. - // - // Type: int - // Required: If missing, response is assumed to be successful. - // Stability: stable - // Examples: -32700, 100 - RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") - // `error.message` property of response if it is an error response. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Parse error', 'User already exists' - RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") -) diff --git a/vendor/go.opentelemetry.io/otel/trace/context.go b/vendor/go.opentelemetry.io/otel/trace/context.go index 5650a174b4a0d..8c45a7107ff91 100644 --- a/vendor/go.opentelemetry.io/otel/trace/context.go +++ b/vendor/go.opentelemetry.io/otel/trace/context.go @@ -22,7 +22,7 @@ func ContextWithSpanContext(parent context.Context, sc SpanContext) context.Cont return ContextWithSpan(parent, nonRecordingSpan{sc: sc}) } -// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicly +// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicitly // as a remote SpanContext and as the current Span. The Span implementation // that wraps rsc is non-recording and performs no operations other than to // return rsc as the SpanContext from the SpanContext method. diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go index d661c5d100f75..cdbf41d6d7f2e 100644 --- a/vendor/go.opentelemetry.io/otel/trace/doc.go +++ b/vendor/go.opentelemetry.io/otel/trace/doc.go @@ -96,7 +96,7 @@ can embed the API interface directly. This option is not recommended. It will lead to publishing packages that contain runtime panics when users update to newer versions of -[go.opentelemetry.io/otel/trace], which may be done with a trasitive +[go.opentelemetry.io/otel/trace], which may be done with a transitive dependency. Finally, an author can embed another implementation in theirs. The embedded diff --git a/vendor/go.opentelemetry.io/otel/trace/provider.go b/vendor/go.opentelemetry.io/otel/trace/provider.go new file mode 100644 index 0000000000000..ef85cb70c6d8b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/provider.go @@ -0,0 +1,59 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import "go.opentelemetry.io/otel/trace/embedded" + +// TracerProvider provides Tracers that are used by instrumentation code to +// trace computational workflows. +// +// A TracerProvider is the collection destination of all Spans from Tracers it +// provides, it represents a unique telemetry collection pipeline. How that +// pipeline is defined, meaning how those Spans are collected, processed, and +// where they are exported, depends on its implementation. Instrumentation +// authors do not need to define this implementation, rather just use the +// provided Tracers to instrument code. +// +// Commonly, instrumentation code will accept a TracerProvider implementation +// at runtime from its users or it can simply use the globally registered one +// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type TracerProvider interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.TracerProvider + + // Tracer returns a unique Tracer scoped to be used by instrumentation code + // to trace computational workflows. The scope and identity of that + // instrumentation code is uniquely defined by the name and options passed. + // + // The passed name needs to uniquely identify instrumentation code. + // Therefore, it is recommended that name is the Go package name of the + // library providing instrumentation (note: not the code being + // instrumented). Instrumentation libraries can have multiple versions, + // therefore, the WithInstrumentationVersion option should be used to + // distinguish these different codebases. Additionally, instrumentation + // libraries may sometimes use traces to communicate different domains of + // workflow data (i.e. using spans to communicate workflow events only). If + // this is the case, the WithScopeAttributes option should be used to + // uniquely identify Tracers that handle the different domains of workflow + // data. + // + // If the same name and options are passed multiple times, the same Tracer + // will be returned (it is up to the implementation if this will be the + // same underlying instance of that Tracer or not). It is not necessary to + // call this multiple times with the same name and options to get an + // up-to-date Tracer. All implementations will ensure any TracerProvider + // configuration changes are propagated to all provided Tracers. + // + // If name is empty, then an implementation defined default name will be + // used instead. + // + // This method is safe to call concurrently. + Tracer(name string, options ...TracerOption) Tracer +} diff --git a/vendor/go.opentelemetry.io/otel/trace/span.go b/vendor/go.opentelemetry.io/otel/trace/span.go new file mode 100644 index 0000000000000..d3aa476ee1254 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/span.go @@ -0,0 +1,177 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace/embedded" +) + +// Span is the individual component of a trace. It represents a single named +// and timed operation of a workflow that is traced. A Tracer is used to +// create a Span and it is then up to the operation the Span represents to +// properly end the Span when the operation itself ends. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Span interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Span + + // End completes the Span. The Span is considered complete and ready to be + // delivered through the rest of the telemetry pipeline after this method + // is called. Therefore, updates to the Span are not allowed after this + // method has been called. + End(options ...SpanEndOption) + + // AddEvent adds an event with the provided name and options. + AddEvent(name string, options ...EventOption) + + // AddLink adds a link. + // Adding links at span creation using WithLinks is preferred to calling AddLink + // later, for contexts that are available during span creation, because head + // sampling decisions can only consider information present during span creation. + AddLink(link Link) + + // IsRecording returns the recording state of the Span. It will return + // true if the Span is active and events can be recorded. + IsRecording() bool + + // RecordError will record err as an exception span event for this span. An + // additional call to SetStatus is required if the Status of the Span should + // be set to Error, as this method does not change the Span status. If this + // span is not being recorded or err is nil then this method does nothing. + RecordError(err error, options ...EventOption) + + // SpanContext returns the SpanContext of the Span. The returned SpanContext + // is usable even after the End method has been called for the Span. + SpanContext() SpanContext + + // SetStatus sets the status of the Span in the form of a code and a + // description, provided the status hasn't already been set to a higher + // value before (OK > Error > Unset). The description is only included in a + // status when the code is for an error. + SetStatus(code codes.Code, description string) + + // SetName sets the Span name. + SetName(name string) + + // SetAttributes sets kv as attributes of the Span. If a key from kv + // already exists for an attribute of the Span it will be overwritten with + // the value contained in kv. + SetAttributes(kv ...attribute.KeyValue) + + // TracerProvider returns a TracerProvider that can be used to generate + // additional Spans on the same telemetry pipeline as the current Span. + TracerProvider() TracerProvider +} + +// Link is the relationship between two Spans. The relationship can be within +// the same Trace or across different Traces. +// +// For example, a Link is used in the following situations: +// +// 1. Batch Processing: A batch of operations may contain operations +// associated with one or more traces/spans. Since there can only be one +// parent SpanContext, a Link is used to keep reference to the +// SpanContext of all operations in the batch. +// 2. Public Endpoint: A SpanContext for an in incoming client request on a +// public endpoint should be considered untrusted. In such a case, a new +// trace with its own identity and sampling decision needs to be created, +// but this new trace needs to be related to the original trace in some +// form. A Link is used to keep reference to the original SpanContext and +// track the relationship. +type Link struct { + // SpanContext of the linked Span. + SpanContext SpanContext + + // Attributes describe the aspects of the link. + Attributes []attribute.KeyValue +} + +// LinkFromContext returns a link encapsulating the SpanContext in the provided +// ctx. +func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link { + return Link{ + SpanContext: SpanContextFromContext(ctx), + Attributes: attrs, + } +} + +// SpanKind is the role a Span plays in a Trace. +type SpanKind int + +// As a convenience, these match the proto definition, see +// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129 +// +// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()` +// to coerce a span kind to a valid value. +const ( + // SpanKindUnspecified is an unspecified SpanKind and is not a valid + // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal + // if it is received. + SpanKindUnspecified SpanKind = 0 + // SpanKindInternal is a SpanKind for a Span that represents an internal + // operation within an application. + SpanKindInternal SpanKind = 1 + // SpanKindServer is a SpanKind for a Span that represents the operation + // of handling a request from a client. + SpanKindServer SpanKind = 2 + // SpanKindClient is a SpanKind for a Span that represents the operation + // of client making a request to a server. + SpanKindClient SpanKind = 3 + // SpanKindProducer is a SpanKind for a Span that represents the operation + // of a producer sending a message to a message broker. Unlike + // SpanKindClient and SpanKindServer, there is often no direct + // relationship between this kind of Span and a SpanKindConsumer kind. A + // SpanKindProducer Span will end once the message is accepted by the + // message broker which might not overlap with the processing of that + // message. + SpanKindProducer SpanKind = 4 + // SpanKindConsumer is a SpanKind for a Span that represents the operation + // of a consumer receiving a message from a message broker. Like + // SpanKindProducer Spans, there is often no direct relationship between + // this Span and the Span that produced the message. + SpanKindConsumer SpanKind = 5 +) + +// ValidateSpanKind returns a valid span kind value. This will coerce +// invalid values into the default value, SpanKindInternal. +func ValidateSpanKind(spanKind SpanKind) SpanKind { + switch spanKind { + case SpanKindInternal, + SpanKindServer, + SpanKindClient, + SpanKindProducer, + SpanKindConsumer: + // valid + return spanKind + default: + return SpanKindInternal + } +} + +// String returns the specified name of the SpanKind in lower-case. +func (sk SpanKind) String() string { + switch sk { + case SpanKindInternal: + return "internal" + case SpanKindServer: + return "server" + case SpanKindClient: + return "client" + case SpanKindProducer: + return "producer" + case SpanKindConsumer: + return "consumer" + default: + return "unspecified" + } +} diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go index 28877d4ab4dff..d49adf671b954 100644 --- a/vendor/go.opentelemetry.io/otel/trace/trace.go +++ b/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -5,13 +5,8 @@ package trace // import "go.opentelemetry.io/otel/trace" import ( "bytes" - "context" "encoding/hex" "encoding/json" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/trace/embedded" ) const ( @@ -326,247 +321,3 @@ func (sc SpanContext) MarshalJSON() ([]byte, error) { Remote: sc.remote, }) } - -// Span is the individual component of a trace. It represents a single named -// and timed operation of a workflow that is traced. A Tracer is used to -// create a Span and it is then up to the operation the Span represents to -// properly end the Span when the operation itself ends. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Span interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Span - - // End completes the Span. The Span is considered complete and ready to be - // delivered through the rest of the telemetry pipeline after this method - // is called. Therefore, updates to the Span are not allowed after this - // method has been called. - End(options ...SpanEndOption) - - // AddEvent adds an event with the provided name and options. - AddEvent(name string, options ...EventOption) - - // AddLink adds a link. - // Adding links at span creation using WithLinks is preferred to calling AddLink - // later, for contexts that are available during span creation, because head - // sampling decisions can only consider information present during span creation. - AddLink(link Link) - - // IsRecording returns the recording state of the Span. It will return - // true if the Span is active and events can be recorded. - IsRecording() bool - - // RecordError will record err as an exception span event for this span. An - // additional call to SetStatus is required if the Status of the Span should - // be set to Error, as this method does not change the Span status. If this - // span is not being recorded or err is nil then this method does nothing. - RecordError(err error, options ...EventOption) - - // SpanContext returns the SpanContext of the Span. The returned SpanContext - // is usable even after the End method has been called for the Span. - SpanContext() SpanContext - - // SetStatus sets the status of the Span in the form of a code and a - // description, provided the status hasn't already been set to a higher - // value before (OK > Error > Unset). The description is only included in a - // status when the code is for an error. - SetStatus(code codes.Code, description string) - - // SetName sets the Span name. - SetName(name string) - - // SetAttributes sets kv as attributes of the Span. If a key from kv - // already exists for an attribute of the Span it will be overwritten with - // the value contained in kv. - SetAttributes(kv ...attribute.KeyValue) - - // TracerProvider returns a TracerProvider that can be used to generate - // additional Spans on the same telemetry pipeline as the current Span. - TracerProvider() TracerProvider -} - -// Link is the relationship between two Spans. The relationship can be within -// the same Trace or across different Traces. -// -// For example, a Link is used in the following situations: -// -// 1. Batch Processing: A batch of operations may contain operations -// associated with one or more traces/spans. Since there can only be one -// parent SpanContext, a Link is used to keep reference to the -// SpanContext of all operations in the batch. -// 2. Public Endpoint: A SpanContext for an in incoming client request on a -// public endpoint should be considered untrusted. In such a case, a new -// trace with its own identity and sampling decision needs to be created, -// but this new trace needs to be related to the original trace in some -// form. A Link is used to keep reference to the original SpanContext and -// track the relationship. -type Link struct { - // SpanContext of the linked Span. - SpanContext SpanContext - - // Attributes describe the aspects of the link. - Attributes []attribute.KeyValue -} - -// LinkFromContext returns a link encapsulating the SpanContext in the provided ctx. -func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link { - return Link{ - SpanContext: SpanContextFromContext(ctx), - Attributes: attrs, - } -} - -// SpanKind is the role a Span plays in a Trace. -type SpanKind int - -// As a convenience, these match the proto definition, see -// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129 -// -// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()` -// to coerce a span kind to a valid value. -const ( - // SpanKindUnspecified is an unspecified SpanKind and is not a valid - // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal - // if it is received. - SpanKindUnspecified SpanKind = 0 - // SpanKindInternal is a SpanKind for a Span that represents an internal - // operation within an application. - SpanKindInternal SpanKind = 1 - // SpanKindServer is a SpanKind for a Span that represents the operation - // of handling a request from a client. - SpanKindServer SpanKind = 2 - // SpanKindClient is a SpanKind for a Span that represents the operation - // of client making a request to a server. - SpanKindClient SpanKind = 3 - // SpanKindProducer is a SpanKind for a Span that represents the operation - // of a producer sending a message to a message broker. Unlike - // SpanKindClient and SpanKindServer, there is often no direct - // relationship between this kind of Span and a SpanKindConsumer kind. A - // SpanKindProducer Span will end once the message is accepted by the - // message broker which might not overlap with the processing of that - // message. - SpanKindProducer SpanKind = 4 - // SpanKindConsumer is a SpanKind for a Span that represents the operation - // of a consumer receiving a message from a message broker. Like - // SpanKindProducer Spans, there is often no direct relationship between - // this Span and the Span that produced the message. - SpanKindConsumer SpanKind = 5 -) - -// ValidateSpanKind returns a valid span kind value. This will coerce -// invalid values into the default value, SpanKindInternal. -func ValidateSpanKind(spanKind SpanKind) SpanKind { - switch spanKind { - case SpanKindInternal, - SpanKindServer, - SpanKindClient, - SpanKindProducer, - SpanKindConsumer: - // valid - return spanKind - default: - return SpanKindInternal - } -} - -// String returns the specified name of the SpanKind in lower-case. -func (sk SpanKind) String() string { - switch sk { - case SpanKindInternal: - return "internal" - case SpanKindServer: - return "server" - case SpanKindClient: - return "client" - case SpanKindProducer: - return "producer" - case SpanKindConsumer: - return "consumer" - default: - return "unspecified" - } -} - -// Tracer is the creator of Spans. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Tracer interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Tracer - - // Start creates a span and a context.Context containing the newly-created span. - // - // If the context.Context provided in `ctx` contains a Span then the newly-created - // Span will be a child of that span, otherwise it will be a root span. This behavior - // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the - // newly-created Span to be a root span even if `ctx` contains a Span. - // - // When creating a Span it is recommended to provide all known span attributes using - // the `WithAttributes()` SpanOption as samplers will only have access to the - // attributes provided when a Span is created. - // - // Any Span that is created MUST also be ended. This is the responsibility of the user. - // Implementations of this API may leak memory or other resources if Spans are not ended. - Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) -} - -// TracerProvider provides Tracers that are used by instrumentation code to -// trace computational workflows. -// -// A TracerProvider is the collection destination of all Spans from Tracers it -// provides, it represents a unique telemetry collection pipeline. How that -// pipeline is defined, meaning how those Spans are collected, processed, and -// where they are exported, depends on its implementation. Instrumentation -// authors do not need to define this implementation, rather just use the -// provided Tracers to instrument code. -// -// Commonly, instrumentation code will accept a TracerProvider implementation -// at runtime from its users or it can simply use the globally registered one -// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type TracerProvider interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.TracerProvider - - // Tracer returns a unique Tracer scoped to be used by instrumentation code - // to trace computational workflows. The scope and identity of that - // instrumentation code is uniquely defined by the name and options passed. - // - // The passed name needs to uniquely identify instrumentation code. - // Therefore, it is recommended that name is the Go package name of the - // library providing instrumentation (note: not the code being - // instrumented). Instrumentation libraries can have multiple versions, - // therefore, the WithInstrumentationVersion option should be used to - // distinguish these different codebases. Additionally, instrumentation - // libraries may sometimes use traces to communicate different domains of - // workflow data (i.e. using spans to communicate workflow events only). If - // this is the case, the WithScopeAttributes option should be used to - // uniquely identify Tracers that handle the different domains of workflow - // data. - // - // If the same name and options are passed multiple times, the same Tracer - // will be returned (it is up to the implementation if this will be the - // same underlying instance of that Tracer or not). It is not necessary to - // call this multiple times with the same name and options to get an - // up-to-date Tracer. All implementations will ensure any TracerProvider - // configuration changes are propagated to all provided Tracers. - // - // If name is empty, then an implementation defined default name will be - // used instead. - // - // This method is safe to call concurrently. - Tracer(name string, options ...TracerOption) Tracer -} diff --git a/vendor/go.opentelemetry.io/otel/trace/tracer.go b/vendor/go.opentelemetry.io/otel/trace/tracer.go new file mode 100644 index 0000000000000..77952d2a0b310 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/tracer.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/trace/embedded" +) + +// Tracer is the creator of Spans. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Tracer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Tracer + + // Start creates a span and a context.Context containing the newly-created span. + // + // If the context.Context provided in `ctx` contains a Span then the newly-created + // Span will be a child of that span, otherwise it will be a root span. This behavior + // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the + // newly-created Span to be a root span even if `ctx` contains a Span. + // + // When creating a Span it is recommended to provide all known span attributes using + // the `WithAttributes()` SpanOption as samplers will only have access to the + // attributes provided when a Span is created. + // + // Any Span that is created MUST also be ended. This is the responsibility of the user. + // Implementations of this API may leak memory or other resources if Spans are not ended. + Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) +} diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go index 20b5cf24332da..dc5e34cad0dd4 100644 --- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go +++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go @@ -260,6 +260,16 @@ func (ts TraceState) Get(key string) string { return "" } +// Walk walks all key value pairs in the TraceState by calling f +// Iteration stops if f returns false. +func (ts TraceState) Walk(f func(key, value string) bool) { + for _, m := range ts.list { + if !f(m.Key, m.Value) { + break + } + } +} + // Insert adds a new list-member defined by the key/value pair to the // TraceState. If a list-member already exists for the given key, that // list-member's value is updated. The new or updated list-member is always diff --git a/vendor/go.opentelemetry.io/otel/verify_examples.sh b/vendor/go.opentelemetry.io/otel/verify_examples.sh deleted file mode 100644 index e57bf57fce82b..0000000000000 --- a/vendor/go.opentelemetry.io/otel/verify_examples.sh +++ /dev/null @@ -1,74 +0,0 @@ -#!/bin/bash - -# Copyright The OpenTelemetry Authors -# SPDX-License-Identifier: Apache-2.0 - -set -euo pipefail - -cd $(dirname $0) -TOOLS_DIR=$(pwd)/.tools - -if [ -z "${GOPATH}" ] ; then - printf "GOPATH is not defined.\n" - exit -1 -fi - -if [ ! -d "${GOPATH}" ] ; then - printf "GOPATH ${GOPATH} is invalid \n" - exit -1 -fi - -# Pre-requisites -if ! git diff --quiet; then \ - git status - printf "\n\nError: working tree is not clean\n" - exit -1 -fi - -if [ "$(git tag --contains $(git log -1 --pretty=format:"%H"))" = "" ] ; then - printf "$(git log -1)" - printf "\n\nError: HEAD is not pointing to a tagged version" -fi - -make ${TOOLS_DIR}/gojq - -DIR_TMP="${GOPATH}/src/oteltmp/" -rm -rf $DIR_TMP -mkdir -p $DIR_TMP - -printf "Copy examples to ${DIR_TMP}\n" -cp -a ./example ${DIR_TMP} - -# Update go.mod files -printf "Update go.mod: rename module and remove replace\n" - -PACKAGE_DIRS=$(find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | egrep 'example' | sed 's/^\.\///' | sort) - -for dir in $PACKAGE_DIRS; do - printf " Update go.mod for $dir\n" - (cd "${DIR_TMP}/${dir}" && \ - # replaces is ("mod1" "mod2" …) - replaces=($(go mod edit -json | ${TOOLS_DIR}/gojq '.Replace[].Old.Path')) && \ - # strip double quotes - replaces=("${replaces[@]%\"}") && \ - replaces=("${replaces[@]#\"}") && \ - # make an array (-dropreplace=mod1 -dropreplace=mod2 …) - dropreplaces=("${replaces[@]/#/-dropreplace=}") && \ - go mod edit -module "oteltmp/${dir}" "${dropreplaces[@]}" && \ - go mod tidy) -done -printf "Update done:\n\n" - -# Build directories that contain main package. These directories are different than -# directories that contain go.mod files. -printf "Build examples:\n" -EXAMPLES=$(./get_main_pkgs.sh ./example) -for ex in $EXAMPLES; do - printf " Build $ex in ${DIR_TMP}/${ex}\n" - (cd "${DIR_TMP}/${ex}" && \ - go build .) -done - -# Cleanup -printf "Remove copied files.\n" -rm -rf $DIR_TMP diff --git a/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh b/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh new file mode 100644 index 0000000000000..c9b7cdbbfef7e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +TARGET="${1:?Must provide target ref}" + +FILE="CHANGELOG.md" +TEMP_DIR=$(mktemp -d) +echo "Temp folder: $TEMP_DIR" + +# Only the latest commit of the feature branch is available +# automatically. To diff with the base branch, we need to +# fetch that too (and we only need its latest commit). +git fetch origin "${TARGET}" --depth=1 + +# Checkout the previous version on the base branch of the changelog to tmpfolder +git --work-tree="$TEMP_DIR" checkout FETCH_HEAD $FILE + +PREVIOUS_FILE="$TEMP_DIR/$FILE" +CURRENT_FILE="$FILE" +PREVIOUS_LOCKED_FILE="$TEMP_DIR/previous_locked_section.md" +CURRENT_LOCKED_FILE="$TEMP_DIR/current_locked_section.md" + +# Extract released sections from the previous version +awk '/^/ {flag=1} /^/ {flag=0} flag' "$PREVIOUS_FILE" > "$PREVIOUS_LOCKED_FILE" + +# Extract released sections from the current version +awk '/^/ {flag=1} /^/ {flag=0} flag' "$CURRENT_FILE" > "$CURRENT_LOCKED_FILE" + +# Compare the released sections +if ! diff -q "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE"; then + echo "Error: The released sections of the changelog file have been modified." + diff "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE" + rm -rf "$TEMP_DIR" + false +fi + +rm -rf "$TEMP_DIR" +echo "The released sections remain unchanged." diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index ab28960524b81..59e24816137f5 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.28.0" + return "1.32.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 241cfc82a8d0e..c04b12f6b74c3 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,19 +3,13 @@ module-sets: stable-v1: - version: v1.28.0 + version: v1.32.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus - go.opentelemetry.io/otel/bridge/opencensus/test - go.opentelemetry.io/otel/bridge/opentracing - go.opentelemetry.io/otel/bridge/opentracing/test - - go.opentelemetry.io/otel/example/dice - - go.opentelemetry.io/otel/example/namedtracer - - go.opentelemetry.io/otel/example/opencensus - - go.opentelemetry.io/otel/example/otel-collector - - go.opentelemetry.io/otel/example/passthrough - - go.opentelemetry.io/otel/example/zipkin - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp - go.opentelemetry.io/otel/exporters/otlp/otlptrace @@ -29,21 +23,20 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.50.0 + version: v0.54.0 modules: - - go.opentelemetry.io/otel/example/prometheus - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.4.0 + version: v0.8.0 modules: - go.opentelemetry.io/otel/log - go.opentelemetry.io/otel/sdk/log + - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp - go.opentelemetry.io/otel/exporters/stdout/stdoutlog experimental-schema: - version: v0.0.8 + version: v0.0.11 modules: - go.opentelemetry.io/otel/schema excluded-modules: - go.opentelemetry.io/otel/internal/tools - - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc diff --git a/vendor/golang.org/x/sys/cpu/asm_darwin_x86_gc.s b/vendor/golang.org/x/sys/cpu/asm_darwin_x86_gc.s new file mode 100644 index 0000000000000..ec2acfe540ea7 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/asm_darwin_x86_gc.s @@ -0,0 +1,17 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin && amd64 && gc + +#include "textflag.h" + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) + +TEXT libc_sysctlbyname_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctlbyname(SB) +GLOBL ·libc_sysctlbyname_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctlbyname_trampoline_addr(SB)/8, $libc_sysctlbyname_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/cpu/cpu_darwin_x86.go b/vendor/golang.org/x/sys/cpu/cpu_darwin_x86.go new file mode 100644 index 0000000000000..b838cb9e956e6 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_darwin_x86.go @@ -0,0 +1,61 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin && amd64 && gc + +package cpu + +// darwinSupportsAVX512 checks Darwin kernel for AVX512 support via sysctl +// call (see issue 43089). It also restricts AVX512 support for Darwin to +// kernel version 21.3.0 (MacOS 12.2.0) or later (see issue 49233). +// +// Background: +// Darwin implements a special mechanism to economize on thread state when +// AVX512 specific registers are not in use. This scheme minimizes state when +// preempting threads that haven't yet used any AVX512 instructions, but adds +// special requirements to check for AVX512 hardware support at runtime (e.g. +// via sysctl call or commpage inspection). See issue 43089 and link below for +// full background: +// https://github.com/apple-oss-distributions/xnu/blob/xnu-11215.1.10/osfmk/i386/fpu.c#L214-L240 +// +// Additionally, all versions of the Darwin kernel from 19.6.0 through 21.2.0 +// (corresponding to MacOS 10.15.6 - 12.1) have a bug that can cause corruption +// of the AVX512 mask registers (K0-K7) upon signal return. For this reason +// AVX512 is considered unsafe to use on Darwin for kernel versions prior to +// 21.3.0, where a fix has been confirmed. See issue 49233 for full background. +func darwinSupportsAVX512() bool { + return darwinSysctlEnabled([]byte("hw.optional.avx512f\x00")) && darwinKernelVersionCheck(21, 3, 0) +} + +// Ensure Darwin kernel version is at least major.minor.patch, avoiding dependencies +func darwinKernelVersionCheck(major, minor, patch int) bool { + var release [256]byte + err := darwinOSRelease(&release) + if err != nil { + return false + } + + var mmp [3]int + c := 0 +Loop: + for _, b := range release[:] { + switch { + case b >= '0' && b <= '9': + mmp[c] = 10*mmp[c] + int(b-'0') + case b == '.': + c++ + if c > 2 { + return false + } + case b == 0: + break Loop + default: + return false + } + } + if c != 2 { + return false + } + return mmp[0] > major || mmp[0] == major && (mmp[1] > minor || mmp[1] == minor && mmp[2] >= patch) +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go index 910728fb163f3..32a44514e245f 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go @@ -6,10 +6,10 @@ package cpu -// cpuid is implemented in cpu_x86.s for gc compiler +// cpuid is implemented in cpu_gc_x86.s for gc compiler // and in cpu_gccgo.c for gccgo. func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) -// xgetbv with ecx = 0 is implemented in cpu_x86.s for gc compiler +// xgetbv with ecx = 0 is implemented in cpu_gc_x86.s for gc compiler // and in cpu_gccgo.c for gccgo. func xgetbv() (eax, edx uint32) diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.s b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.s similarity index 94% rename from vendor/golang.org/x/sys/cpu/cpu_x86.s rename to vendor/golang.org/x/sys/cpu/cpu_gc_x86.s index 7d7ba33efb855..ce208ce6d6a3a 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.s +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.s @@ -18,7 +18,7 @@ TEXT ·cpuid(SB), NOSPLIT, $0-24 RET // func xgetbv() (eax, edx uint32) -TEXT ·xgetbv(SB),NOSPLIT,$0-8 +TEXT ·xgetbv(SB), NOSPLIT, $0-8 MOVL $0, CX XGETBV MOVL AX, eax+0(FP) diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go index 99c60fe9f9c65..170d21ddfda41 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go @@ -23,9 +23,3 @@ func xgetbv() (eax, edx uint32) { gccgoXgetbv(&a, &d) return a, d } - -// gccgo doesn't build on Darwin, per: -// https://github.com/Homebrew/homebrew-core/blob/HEAD/Formula/gcc.rb#L76 -func darwinSupportsAVX512() bool { - return false -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go index 08f35ea17735f..f1caf0f78e24b 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go @@ -110,7 +110,6 @@ func doinit() { ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM) ARM64.HasDIT = isSet(hwCap, hwcap_DIT) - // HWCAP2 feature bits ARM64.HasSVE2 = isSet(hwCap2, hwcap2_SVE2) ARM64.HasI8MM = isSet(hwCap2, hwcap2_I8MM) diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_x86.go b/vendor/golang.org/x/sys/cpu/cpu_other_x86.go new file mode 100644 index 0000000000000..a0fd7e2f75d37 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_x86.go @@ -0,0 +1,11 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build 386 || amd64p32 || (amd64 && (!darwin || !gc)) + +package cpu + +func darwinSupportsAVX512() bool { + panic("only implemented for gc && amd64 && darwin") +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go index c29f5e4c5a6e4..600a6807861e7 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -92,10 +92,8 @@ func archInit() { osSupportsAVX = isSet(1, eax) && isSet(2, eax) if runtime.GOOS == "darwin" { - // Darwin doesn't save/restore AVX-512 mask registers correctly across signal handlers. - // Since users can't rely on mask register contents, let's not advertise AVX-512 support. - // See issue 49233. - osSupportsAVX512 = false + // Darwin requires special AVX512 checks, see cpu_darwin_x86.go + osSupportsAVX512 = osSupportsAVX && darwinSupportsAVX512() } else { // Check if OPMASK and ZMM registers have OS support. osSupportsAVX512 = osSupportsAVX && isSet(5, eax) && isSet(6, eax) && isSet(7, eax) diff --git a/vendor/golang.org/x/sys/cpu/syscall_darwin_x86_gc.go b/vendor/golang.org/x/sys/cpu/syscall_darwin_x86_gc.go new file mode 100644 index 0000000000000..4d0888b0c010f --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/syscall_darwin_x86_gc.go @@ -0,0 +1,98 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Minimal copy of x/sys/unix so the cpu package can make a +// system call on Darwin without depending on x/sys/unix. + +//go:build darwin && amd64 && gc + +package cpu + +import ( + "syscall" + "unsafe" +) + +type _C_int int32 + +// adapted from unix.Uname() at x/sys/unix/syscall_darwin.go L419 +func darwinOSRelease(release *[256]byte) error { + // from x/sys/unix/zerrors_openbsd_amd64.go + const ( + CTL_KERN = 0x1 + KERN_OSRELEASE = 0x2 + ) + + mib := []_C_int{CTL_KERN, KERN_OSRELEASE} + n := unsafe.Sizeof(*release) + + return sysctl(mib, &release[0], &n, nil, 0) +} + +type Errno = syscall.Errno + +var _zero uintptr // Single-word zero for use when we need a valid pointer to 0 bytes. + +// from x/sys/unix/zsyscall_darwin_amd64.go L791-807 +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + if _, _, err := syscall_syscall6( + libc_sysctl_trampoline_addr, + uintptr(_p0), + uintptr(len(mib)), + uintptr(unsafe.Pointer(old)), + uintptr(unsafe.Pointer(oldlen)), + uintptr(unsafe.Pointer(new)), + uintptr(newlen), + ); err != 0 { + return err + } + + return nil +} + +var libc_sysctl_trampoline_addr uintptr + +// adapted from internal/cpu/cpu_arm64_darwin.go +func darwinSysctlEnabled(name []byte) bool { + out := int32(0) + nout := unsafe.Sizeof(out) + if ret := sysctlbyname(&name[0], (*byte)(unsafe.Pointer(&out)), &nout, nil, 0); ret != nil { + return false + } + return out > 0 +} + +//go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib" + +var libc_sysctlbyname_trampoline_addr uintptr + +// adapted from runtime/sys_darwin.go in the pattern of sysctl() above, as defined in x/sys/unix +func sysctlbyname(name *byte, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error { + if _, _, err := syscall_syscall6( + libc_sysctlbyname_trampoline_addr, + uintptr(unsafe.Pointer(name)), + uintptr(unsafe.Pointer(old)), + uintptr(unsafe.Pointer(oldlen)), + uintptr(unsafe.Pointer(new)), + uintptr(newlen), + 0, + ); err != 0 { + return err + } + + return nil +} + +//go:cgo_import_dynamic libc_sysctlbyname sysctlbyname "/usr/lib/libSystem.B.dylib" + +// Implemented in the runtime package (runtime/sys_darwin.go) +func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) + +//go:linkname syscall_syscall6 syscall.syscall6 diff --git a/vendor/golang.org/x/sys/unix/ioctl_linux.go b/vendor/golang.org/x/sys/unix/ioctl_linux.go index dbe680eab88a9..7ca4fa12aa673 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_linux.go +++ b/vendor/golang.org/x/sys/unix/ioctl_linux.go @@ -58,6 +58,102 @@ func IoctlGetEthtoolDrvinfo(fd int, ifname string) (*EthtoolDrvinfo, error) { return &value, err } +// IoctlGetEthtoolTsInfo fetches ethtool timestamping and PHC +// association for the network device specified by ifname. +func IoctlGetEthtoolTsInfo(fd int, ifname string) (*EthtoolTsInfo, error) { + ifr, err := NewIfreq(ifname) + if err != nil { + return nil, err + } + + value := EthtoolTsInfo{Cmd: ETHTOOL_GET_TS_INFO} + ifrd := ifr.withData(unsafe.Pointer(&value)) + + err = ioctlIfreqData(fd, SIOCETHTOOL, &ifrd) + return &value, err +} + +// IoctlGetHwTstamp retrieves the hardware timestamping configuration +// for the network device specified by ifname. +func IoctlGetHwTstamp(fd int, ifname string) (*HwTstampConfig, error) { + ifr, err := NewIfreq(ifname) + if err != nil { + return nil, err + } + + value := HwTstampConfig{} + ifrd := ifr.withData(unsafe.Pointer(&value)) + + err = ioctlIfreqData(fd, SIOCGHWTSTAMP, &ifrd) + return &value, err +} + +// IoctlSetHwTstamp updates the hardware timestamping configuration for +// the network device specified by ifname. +func IoctlSetHwTstamp(fd int, ifname string, cfg *HwTstampConfig) error { + ifr, err := NewIfreq(ifname) + if err != nil { + return err + } + ifrd := ifr.withData(unsafe.Pointer(cfg)) + return ioctlIfreqData(fd, SIOCSHWTSTAMP, &ifrd) +} + +// FdToClockID derives the clock ID from the file descriptor number +// - see clock_gettime(3), FD_TO_CLOCKID macros. The resulting ID is +// suitable for system calls like ClockGettime. +func FdToClockID(fd int) int32 { return int32((int(^fd) << 3) | 3) } + +// IoctlPtpClockGetcaps returns the description of a given PTP device. +func IoctlPtpClockGetcaps(fd int) (*PtpClockCaps, error) { + var value PtpClockCaps + err := ioctlPtr(fd, PTP_CLOCK_GETCAPS2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpSysOffsetPrecise returns a description of the clock +// offset compared to the system clock. +func IoctlPtpSysOffsetPrecise(fd int) (*PtpSysOffsetPrecise, error) { + var value PtpSysOffsetPrecise + err := ioctlPtr(fd, PTP_SYS_OFFSET_PRECISE2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpSysOffsetExtended returns an extended description of the +// clock offset compared to the system clock. The samples parameter +// specifies the desired number of measurements. +func IoctlPtpSysOffsetExtended(fd int, samples uint) (*PtpSysOffsetExtended, error) { + value := PtpSysOffsetExtended{Samples: uint32(samples)} + err := ioctlPtr(fd, PTP_SYS_OFFSET_EXTENDED2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpPinGetfunc returns the configuration of the specified +// I/O pin on given PTP device. +func IoctlPtpPinGetfunc(fd int, index uint) (*PtpPinDesc, error) { + value := PtpPinDesc{Index: uint32(index)} + err := ioctlPtr(fd, PTP_PIN_GETFUNC2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpPinSetfunc updates configuration of the specified PTP +// I/O pin. +func IoctlPtpPinSetfunc(fd int, pd *PtpPinDesc) error { + return ioctlPtr(fd, PTP_PIN_SETFUNC2, unsafe.Pointer(pd)) +} + +// IoctlPtpPeroutRequest configures the periodic output mode of the +// PTP I/O pins. +func IoctlPtpPeroutRequest(fd int, r *PtpPeroutRequest) error { + return ioctlPtr(fd, PTP_PEROUT_REQUEST2, unsafe.Pointer(r)) +} + +// IoctlPtpExttsRequest configures the external timestamping mode +// of the PTP I/O pins. +func IoctlPtpExttsRequest(fd int, r *PtpExttsRequest) error { + return ioctlPtr(fd, PTP_EXTTS_REQUEST2, unsafe.Pointer(r)) +} + // IoctlGetWatchdogInfo fetches information about a watchdog device from the // Linux watchdog API. For more information, see: // https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html. diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index ac54ecaba0a4b..6ab02b6c3122a 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -158,6 +158,16 @@ includes_Linux=' #endif #define _GNU_SOURCE +// See the description in unix/linux/types.go +#if defined(__ARM_EABI__) || \ + (defined(__mips__) && (_MIPS_SIM == _ABIO32)) || \ + (defined(__powerpc__) && (!defined(__powerpc64__))) +# ifdef _TIME_BITS +# undef _TIME_BITS +# endif +# define _TIME_BITS 32 +#endif + // is broken on powerpc64, as it fails to include definitions of // these structures. We just include them copied from . #if defined(__powerpc__) @@ -256,6 +266,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -527,6 +538,7 @@ ccflags="$@" $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MREMAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT|UDP)_/ || $2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ || $2 ~ /^NFC_.*_(MAX)?SIZE$/ || + $2 ~ /^PTP_/ || $2 ~ /^RAW_PAYLOAD_/ || $2 ~ /^[US]F_/ || $2 ~ /^TP_STATUS_/ || diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index f08abd434ff47..230a94549a7a2 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1860,6 +1860,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys ClockAdjtime(clockid int32, buf *Timex) (state int, err error) //sys ClockGetres(clockid int32, res *Timespec) (err error) //sys ClockGettime(clockid int32, time *Timespec) (err error) +//sys ClockSettime(clockid int32, time *Timespec) (err error) //sys ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) //sys Close(fd int) (err error) //sys CloseRange(first uint, last uint, flags uint) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index 312ae6ac1d21a..7bf5c04bb0ae0 100644 --- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -768,6 +768,15 @@ func Munmap(b []byte) (err error) { return mapper.Munmap(b) } +func MmapPtr(fd int, offset int64, addr unsafe.Pointer, length uintptr, prot int, flags int) (ret unsafe.Pointer, err error) { + xaddr, err := mapper.mmap(uintptr(addr), length, prot, flags, fd, offset) + return unsafe.Pointer(xaddr), err +} + +func MunmapPtr(addr unsafe.Pointer, length uintptr) (err error) { + return mapper.munmap(uintptr(addr), length) +} + //sys Gethostname(buf []byte) (err error) = SYS___GETHOSTNAME_A //sysnb Getgid() (gid int) //sysnb Getpid() (pid int) @@ -816,10 +825,10 @@ func Lstat(path string, stat *Stat_t) (err error) { // for checking symlinks begins with $VERSION/ $SYSNAME/ $SYSSYMR/ $SYSSYMA/ func isSpecialPath(path []byte) (v bool) { var special = [4][8]byte{ - [8]byte{'V', 'E', 'R', 'S', 'I', 'O', 'N', '/'}, - [8]byte{'S', 'Y', 'S', 'N', 'A', 'M', 'E', '/'}, - [8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'R', '/'}, - [8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'A', '/'}} + {'V', 'E', 'R', 'S', 'I', 'O', 'N', '/'}, + {'S', 'Y', 'S', 'N', 'A', 'M', 'E', '/'}, + {'S', 'Y', 'S', 'S', 'Y', 'M', 'R', '/'}, + {'S', 'Y', 'S', 'S', 'Y', 'M', 'A', '/'}} var i, j int for i = 0; i < len(special); i++ { @@ -3115,3 +3124,90 @@ func legacy_Mkfifoat(dirfd int, path string, mode uint32) (err error) { //sys Posix_openpt(oflag int) (fd int, err error) = SYS_POSIX_OPENPT //sys Grantpt(fildes int) (rc int, err error) = SYS_GRANTPT //sys Unlockpt(fildes int) (rc int, err error) = SYS_UNLOCKPT + +func fcntlAsIs(fd uintptr, cmd int, arg uintptr) (val int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCNTL<<4, uintptr(fd), uintptr(cmd), arg) + runtime.ExitSyscall() + val = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +func Fcntl(fd uintptr, cmd int, op interface{}) (ret int, err error) { + switch op.(type) { + case *Flock_t: + err = FcntlFlock(fd, cmd, op.(*Flock_t)) + if err != nil { + ret = -1 + } + return + case int: + return FcntlInt(fd, cmd, op.(int)) + case *F_cnvrt: + return fcntlAsIs(fd, cmd, uintptr(unsafe.Pointer(op.(*F_cnvrt)))) + case unsafe.Pointer: + return fcntlAsIs(fd, cmd, uintptr(op.(unsafe.Pointer))) + default: + return -1, EINVAL + } + return +} + +func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + return sendfile(outfd, infd, offset, count) +} + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + // TODO: use LE call instead if the call is implemented + originalOffset, err := Seek(infd, 0, SEEK_CUR) + if err != nil { + return -1, err + } + //start reading data from in_fd + if offset != nil { + _, err := Seek(infd, *offset, SEEK_SET) + if err != nil { + return -1, err + } + } + + buf := make([]byte, count) + readBuf := make([]byte, 0) + var n int = 0 + for i := 0; i < count; i += n { + n, err := Read(infd, buf) + if n == 0 { + if err != nil { + return -1, err + } else { // EOF + break + } + } + readBuf = append(readBuf, buf...) + buf = buf[0:0] + } + + n2, err := Write(outfd, readBuf) + if err != nil { + return -1, err + } + + //When sendfile() returns, this variable will be set to the + // offset of the byte following the last byte that was read. + if offset != nil { + *offset = *offset + int64(n) + // If offset is not NULL, then sendfile() does not modify the file + // offset of in_fd + _, err := Seek(infd, originalOffset, SEEK_SET) + if err != nil { + return -1, err + } + } + return n2, nil +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index de3b462489c0b..ccba391c9fb09 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -2625,6 +2625,28 @@ const ( PR_UNALIGN_NOPRINT = 0x1 PR_UNALIGN_SIGBUS = 0x2 PSTOREFS_MAGIC = 0x6165676c + PTP_CLK_MAGIC = '=' + PTP_ENABLE_FEATURE = 0x1 + PTP_EXTTS_EDGES = 0x6 + PTP_EXTTS_EVENT_VALID = 0x1 + PTP_EXTTS_V1_VALID_FLAGS = 0x7 + PTP_EXTTS_VALID_FLAGS = 0x1f + PTP_EXT_OFFSET = 0x10 + PTP_FALLING_EDGE = 0x4 + PTP_MAX_SAMPLES = 0x19 + PTP_PEROUT_DUTY_CYCLE = 0x2 + PTP_PEROUT_ONE_SHOT = 0x1 + PTP_PEROUT_PHASE = 0x4 + PTP_PEROUT_V1_VALID_FLAGS = 0x0 + PTP_PEROUT_VALID_FLAGS = 0x7 + PTP_PIN_GETFUNC = 0xc0603d06 + PTP_PIN_GETFUNC2 = 0xc0603d0f + PTP_RISING_EDGE = 0x2 + PTP_STRICT_FLAGS = 0x8 + PTP_SYS_OFFSET_EXTENDED = 0xc4c03d09 + PTP_SYS_OFFSET_EXTENDED2 = 0xc4c03d12 + PTP_SYS_OFFSET_PRECISE = 0xc0403d08 + PTP_SYS_OFFSET_PRECISE2 = 0xc0403d11 PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 8aa6d77c0184e..0c00cb3f3af83 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -237,6 +237,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_GETFPREGS = 0xe PTRACE_GETFPXREGS = 0x12 PTRACE_GET_THREAD_AREA = 0x19 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index da428f4253398..dfb364554dd58 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -237,6 +237,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_ARCH_PRCTL = 0x1e PTRACE_GETFPREGS = 0xe PTRACE_GETFPXREGS = 0x12 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index bf45bfec78a53..d46dcf78abc9a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -234,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_GETCRUNCHREGS = 0x19 PTRACE_GETFDPIC = 0x1f PTRACE_GETFDPIC_EXEC = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 71c67162b737e..3af3248a7f2e1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -240,6 +240,20 @@ const ( PROT_BTI = 0x10 PROT_MTE = 0x20 PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_PEEKMTETAGS = 0x21 PTRACE_POKEMTETAGS = 0x22 PTRACE_SYSEMU = 0x1f diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 9476628fa02b8..292bcf0283d17 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -238,6 +238,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_SYSEMU = 0x1f PTRACE_SYSEMU_SINGLESTEP = 0x20 RLIMIT_AS = 0x9 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index b9e85f3cf0c05..782b7110fa19b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -234,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index a48b68a7647ef..84973fd9271fb 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -234,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index ea00e8522a159..6d9cbc3b274bf 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -234,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 91c64687176a9..5f9fedbce028d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -234,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 8cbf38d639016..bb0026ee0c467 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -237,6 +237,20 @@ const ( PPPIOCXFERUNIT = 0x2000744e PROT_SAO = 0x10 PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETEVRREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETREGS64 = 0x16 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index a2df7341917ec..46120db5c9a19 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -237,6 +237,20 @@ const ( PPPIOCXFERUNIT = 0x2000744e PROT_SAO = 0x10 PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETEVRREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETREGS64 = 0x16 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 2479137923331..5c951634fbed3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -237,6 +237,20 @@ const ( PPPIOCXFERUNIT = 0x2000744e PROT_SAO = 0x10 PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETEVRREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETREGS64 = 0x16 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index d265f146ee016..11a84d5af2083 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -234,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_GETFDPIC = 0x21 PTRACE_GETFDPIC_EXEC = 0x0 PTRACE_GETFDPIC_INTERP = 0x1 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 3f2d6443964ff..f78c4617cac1f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -234,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_DISABLE_TE = 0x5010 PTRACE_ENABLE_TE = 0x5009 PTRACE_GET_LAST_BREAK = 0x5006 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 5d8b727a1c837..aeb777c34427d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -239,6 +239,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPAREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETFPREGS64 = 0x19 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index af30da5578031..5cc1e8eb2f35e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -592,6 +592,16 @@ func ClockGettime(clockid int32, time *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockSettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_SETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) { _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 3a69e45496268..8daaf3faf4c78 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -1752,12 +1752,6 @@ const ( IFLA_IPVLAN_UNSPEC = 0x0 IFLA_IPVLAN_MODE = 0x1 IFLA_IPVLAN_FLAGS = 0x2 - NETKIT_NEXT = -0x1 - NETKIT_PASS = 0x0 - NETKIT_DROP = 0x2 - NETKIT_REDIRECT = 0x7 - NETKIT_L2 = 0x0 - NETKIT_L3 = 0x1 IFLA_NETKIT_UNSPEC = 0x0 IFLA_NETKIT_PEER_INFO = 0x1 IFLA_NETKIT_PRIMARY = 0x2 @@ -1796,6 +1790,7 @@ const ( IFLA_VXLAN_DF = 0x1d IFLA_VXLAN_VNIFILTER = 0x1e IFLA_VXLAN_LOCALBYPASS = 0x1f + IFLA_VXLAN_LABEL_POLICY = 0x20 IFLA_GENEVE_UNSPEC = 0x0 IFLA_GENEVE_ID = 0x1 IFLA_GENEVE_REMOTE = 0x2 @@ -1825,6 +1820,8 @@ const ( IFLA_GTP_ROLE = 0x4 IFLA_GTP_CREATE_SOCKETS = 0x5 IFLA_GTP_RESTART_COUNT = 0x6 + IFLA_GTP_LOCAL = 0x7 + IFLA_GTP_LOCAL6 = 0x8 IFLA_BOND_UNSPEC = 0x0 IFLA_BOND_MODE = 0x1 IFLA_BOND_ACTIVE_SLAVE = 0x2 @@ -1857,6 +1854,7 @@ const ( IFLA_BOND_AD_LACP_ACTIVE = 0x1d IFLA_BOND_MISSED_MAX = 0x1e IFLA_BOND_NS_IP6_TARGET = 0x1f + IFLA_BOND_COUPLED_CONTROL = 0x20 IFLA_BOND_AD_INFO_UNSPEC = 0x0 IFLA_BOND_AD_INFO_AGGREGATOR = 0x1 IFLA_BOND_AD_INFO_NUM_PORTS = 0x2 @@ -1925,6 +1923,7 @@ const ( IFLA_HSR_SEQ_NR = 0x5 IFLA_HSR_VERSION = 0x6 IFLA_HSR_PROTOCOL = 0x7 + IFLA_HSR_INTERLINK = 0x8 IFLA_STATS_UNSPEC = 0x0 IFLA_STATS_LINK_64 = 0x1 IFLA_STATS_LINK_XSTATS = 0x2 @@ -1977,6 +1976,15 @@ const ( IFLA_DSA_MASTER = 0x1 ) +const ( + NETKIT_NEXT = -0x1 + NETKIT_PASS = 0x0 + NETKIT_DROP = 0x2 + NETKIT_REDIRECT = 0x7 + NETKIT_L2 = 0x0 + NETKIT_L3 = 0x1 +) + const ( NF_INET_PRE_ROUTING = 0x0 NF_INET_LOCAL_IN = 0x1 @@ -4110,6 +4118,106 @@ type EthtoolDrvinfo struct { Regdump_len uint32 } +type EthtoolTsInfo struct { + Cmd uint32 + So_timestamping uint32 + Phc_index int32 + Tx_types uint32 + Tx_reserved [3]uint32 + Rx_filters uint32 + Rx_reserved [3]uint32 +} + +type HwTstampConfig struct { + Flags int32 + Tx_type int32 + Rx_filter int32 +} + +const ( + HWTSTAMP_FILTER_NONE = 0x0 + HWTSTAMP_FILTER_ALL = 0x1 + HWTSTAMP_FILTER_SOME = 0x2 + HWTSTAMP_FILTER_PTP_V1_L4_EVENT = 0x3 + HWTSTAMP_FILTER_PTP_V2_L4_EVENT = 0x6 + HWTSTAMP_FILTER_PTP_V2_L2_EVENT = 0x9 + HWTSTAMP_FILTER_PTP_V2_EVENT = 0xc +) + +const ( + HWTSTAMP_TX_OFF = 0x0 + HWTSTAMP_TX_ON = 0x1 + HWTSTAMP_TX_ONESTEP_SYNC = 0x2 +) + +type ( + PtpClockCaps struct { + Max_adj int32 + N_alarm int32 + N_ext_ts int32 + N_per_out int32 + Pps int32 + N_pins int32 + Cross_timestamping int32 + Adjust_phase int32 + Max_phase_adj int32 + Rsv [11]int32 + } + PtpClockTime struct { + Sec int64 + Nsec uint32 + Reserved uint32 + } + PtpExttsEvent struct { + T PtpClockTime + Index uint32 + Flags uint32 + Rsv [2]uint32 + } + PtpExttsRequest struct { + Index uint32 + Flags uint32 + Rsv [2]uint32 + } + PtpPeroutRequest struct { + StartOrPhase PtpClockTime + Period PtpClockTime + Index uint32 + Flags uint32 + On PtpClockTime + } + PtpPinDesc struct { + Name [64]byte + Index uint32 + Func uint32 + Chan uint32 + Rsv [5]uint32 + } + PtpSysOffset struct { + Samples uint32 + Rsv [3]uint32 + Ts [51]PtpClockTime + } + PtpSysOffsetExtended struct { + Samples uint32 + Rsv [3]uint32 + Ts [25][3]PtpClockTime + } + PtpSysOffsetPrecise struct { + Device PtpClockTime + Realtime PtpClockTime + Monoraw PtpClockTime + Rsv [4]uint32 + } +) + +const ( + PTP_PF_NONE = 0x0 + PTP_PF_EXTTS = 0x1 + PTP_PF_PEROUT = 0x2 + PTP_PF_PHYSYNC = 0x3 +) + type ( HIDRawReportDescriptor struct { Size uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go index d9a13af4684b0..2e5d5a44357a2 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go @@ -377,6 +377,12 @@ type Flock_t struct { Pid int32 } +type F_cnvrt struct { + Cvtcmd int32 + Pccsid int16 + Fccsid int16 +} + type Termios struct { Cflag uint32 Iflag uint32 diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 5cee9a3143fd5..4510bfc3f5c66 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -725,20 +725,12 @@ func DurationSinceBoot() time.Duration { } func Ftruncate(fd Handle, length int64) (err error) { - curoffset, e := Seek(fd, 0, 1) - if e != nil { - return e - } - defer Seek(fd, curoffset, 0) - _, e = Seek(fd, length, 0) - if e != nil { - return e + type _FILE_END_OF_FILE_INFO struct { + EndOfFile int64 } - e = SetEndOfFile(fd) - if e != nil { - return e - } - return nil + var info _FILE_END_OF_FILE_INFO + info.EndOfFile = length + return SetFileInformationByHandle(fd, FileEndOfFileInfo, (*byte)(unsafe.Pointer(&info)), uint32(unsafe.Sizeof(info))) } func Gettimeofday(tv *Timeval) (err error) { @@ -894,6 +886,11 @@ const socket_error = uintptr(^uint32(0)) //sys GetACP() (acp uint32) = kernel32.GetACP //sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar //sys getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) = iphlpapi.GetBestInterfaceEx +//sys GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) = iphlpapi.GetIfEntry2Ex +//sys GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) = iphlpapi.GetUnicastIpAddressEntry +//sys NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyIpInterfaceChange +//sys NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyUnicastIpAddressChange +//sys CancelMibChangeNotify2(notificationHandle Handle) (errcode error) = iphlpapi.CancelMibChangeNotify2 // For testing: clients can set this flag to force // creation of IPv6 sockets to return EAFNOSUPPORT. @@ -1685,13 +1682,16 @@ func (s NTStatus) Error() string { // do not use NTUnicodeString, and instead UTF16PtrFromString should be used for // the more common *uint16 string type. func NewNTUnicodeString(s string) (*NTUnicodeString, error) { - var u NTUnicodeString - s16, err := UTF16PtrFromString(s) + s16, err := UTF16FromString(s) if err != nil { return nil, err } - RtlInitUnicodeString(&u, s16) - return &u, nil + n := uint16(len(s16) * 2) + return &NTUnicodeString{ + Length: n - 2, // subtract 2 bytes for the NULL terminator + MaximumLength: n, + Buffer: &s16[0], + }, nil } // Slice returns a uint16 slice that aliases the data in the NTUnicodeString. diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 7b97a154c9573..51311e205ff00 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -2203,6 +2203,132 @@ const ( IfOperStatusLowerLayerDown = 7 ) +const ( + IF_MAX_PHYS_ADDRESS_LENGTH = 32 + IF_MAX_STRING_SIZE = 256 +) + +// MIB_IF_ENTRY_LEVEL enumeration from netioapi.h or +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/nf-netioapi-getifentry2ex. +const ( + MibIfEntryNormal = 0 + MibIfEntryNormalWithoutStatistics = 2 +) + +// MIB_NOTIFICATION_TYPE enumeration from netioapi.h or +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ne-netioapi-mib_notification_type. +const ( + MibParameterNotification = 0 + MibAddInstance = 1 + MibDeleteInstance = 2 + MibInitialNotification = 3 +) + +// MibIfRow2 stores information about a particular interface. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_if_row2. +type MibIfRow2 struct { + InterfaceLuid uint64 + InterfaceIndex uint32 + InterfaceGuid GUID + Alias [IF_MAX_STRING_SIZE + 1]uint16 + Description [IF_MAX_STRING_SIZE + 1]uint16 + PhysicalAddressLength uint32 + PhysicalAddress [IF_MAX_PHYS_ADDRESS_LENGTH]uint8 + PermanentPhysicalAddress [IF_MAX_PHYS_ADDRESS_LENGTH]uint8 + Mtu uint32 + Type uint32 + TunnelType uint32 + MediaType uint32 + PhysicalMediumType uint32 + AccessType uint32 + DirectionType uint32 + InterfaceAndOperStatusFlags uint8 + OperStatus uint32 + AdminStatus uint32 + MediaConnectState uint32 + NetworkGuid GUID + ConnectionType uint32 + TransmitLinkSpeed uint64 + ReceiveLinkSpeed uint64 + InOctets uint64 + InUcastPkts uint64 + InNUcastPkts uint64 + InDiscards uint64 + InErrors uint64 + InUnknownProtos uint64 + InUcastOctets uint64 + InMulticastOctets uint64 + InBroadcastOctets uint64 + OutOctets uint64 + OutUcastPkts uint64 + OutNUcastPkts uint64 + OutDiscards uint64 + OutErrors uint64 + OutUcastOctets uint64 + OutMulticastOctets uint64 + OutBroadcastOctets uint64 + OutQLen uint64 +} + +// MIB_UNICASTIPADDRESS_ROW stores information about a unicast IP address. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_unicastipaddress_row. +type MibUnicastIpAddressRow struct { + Address RawSockaddrInet6 // SOCKADDR_INET union + InterfaceLuid uint64 + InterfaceIndex uint32 + PrefixOrigin uint32 + SuffixOrigin uint32 + ValidLifetime uint32 + PreferredLifetime uint32 + OnLinkPrefixLength uint8 + SkipAsSource uint8 + DadState uint32 + ScopeId uint32 + CreationTimeStamp Filetime +} + +const ScopeLevelCount = 16 + +// MIB_IPINTERFACE_ROW stores interface management information for a particular IP address family on a network interface. +// See https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipinterface_row. +type MibIpInterfaceRow struct { + Family uint16 + InterfaceLuid uint64 + InterfaceIndex uint32 + MaxReassemblySize uint32 + InterfaceIdentifier uint64 + MinRouterAdvertisementInterval uint32 + MaxRouterAdvertisementInterval uint32 + AdvertisingEnabled uint8 + ForwardingEnabled uint8 + WeakHostSend uint8 + WeakHostReceive uint8 + UseAutomaticMetric uint8 + UseNeighborUnreachabilityDetection uint8 + ManagedAddressConfigurationSupported uint8 + OtherStatefulConfigurationSupported uint8 + AdvertiseDefaultRoute uint8 + RouterDiscoveryBehavior uint32 + DadTransmits uint32 + BaseReachableTime uint32 + RetransmitTime uint32 + PathMtuDiscoveryTimeout uint32 + LinkLocalAddressBehavior uint32 + LinkLocalAddressTimeout uint32 + ZoneIndices [ScopeLevelCount]uint32 + SitePrefixLength uint32 + Metric uint32 + NlMtu uint32 + Connected uint8 + SupportsWakeUpPatterns uint8 + SupportsNeighborDiscovery uint8 + SupportsRouterDiscovery uint8 + ReachableTime uint32 + TransmitOffload uint32 + ReceiveOffload uint32 + DisableDefaultRoutes uint8 +} + // Console related constants used for the mode parameter to SetConsoleMode. See // https://docs.microsoft.com/en-us/windows/console/setconsolemode for details. diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 4c2e1bdc01ed3..6f5252880ceed 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -181,10 +181,15 @@ var ( procDnsRecordListFree = moddnsapi.NewProc("DnsRecordListFree") procDwmGetWindowAttribute = moddwmapi.NewProc("DwmGetWindowAttribute") procDwmSetWindowAttribute = moddwmapi.NewProc("DwmSetWindowAttribute") + procCancelMibChangeNotify2 = modiphlpapi.NewProc("CancelMibChangeNotify2") procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx") procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") + procGetIfEntry2Ex = modiphlpapi.NewProc("GetIfEntry2Ex") + procGetUnicastIpAddressEntry = modiphlpapi.NewProc("GetUnicastIpAddressEntry") + procNotifyIpInterfaceChange = modiphlpapi.NewProc("NotifyIpInterfaceChange") + procNotifyUnicastIpAddressChange = modiphlpapi.NewProc("NotifyUnicastIpAddressChange") procAddDllDirectory = modkernel32.NewProc("AddDllDirectory") procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") procCancelIo = modkernel32.NewProc("CancelIo") @@ -1606,6 +1611,14 @@ func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, si return } +func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) { + r0, _, _ := syscall.SyscallN(procCancelMibChangeNotify2.Addr(), uintptr(notificationHandle)) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0) if r0 != 0 { @@ -1638,6 +1651,46 @@ func GetIfEntry(pIfRow *MibIfRow) (errcode error) { return } +func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) { + r0, _, _ := syscall.SyscallN(procGetIfEntry2Ex.Addr(), uintptr(level), uintptr(unsafe.Pointer(row))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) { + r0, _, _ := syscall.SyscallN(procGetUnicastIpAddressEntry.Addr(), uintptr(unsafe.Pointer(row))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) { + var _p0 uint32 + if initialNotification { + _p0 = 1 + } + r0, _, _ := syscall.SyscallN(procNotifyIpInterfaceChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) { + var _p0 uint32 + if initialNotification { + _p0 = 1 + } + r0, _, _ := syscall.SyscallN(procNotifyUnicastIpAddressChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + func AddDllDirectory(path *uint16) (cookie uintptr, err error) { r0, _, e1 := syscall.Syscall(procAddDllDirectory.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) cookie = uintptr(r0) diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go index fe19e8f97a711..aa69fb4d509ff 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go @@ -719,6 +719,8 @@ type PythonSettings struct { // Some settings. Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` + // Experimental features to be included during client library generation. + ExperimentalFeatures *PythonSettings_ExperimentalFeatures `protobuf:"bytes,2,opt,name=experimental_features,json=experimentalFeatures,proto3" json:"experimental_features,omitempty"` } func (x *PythonSettings) Reset() { @@ -760,6 +762,13 @@ func (x *PythonSettings) GetCommon() *CommonLanguageSettings { return nil } +func (x *PythonSettings) GetExperimentalFeatures() *PythonSettings_ExperimentalFeatures { + if x != nil { + return x.ExperimentalFeatures + } + return nil +} + // Settings for Node client libraries. type NodeSettings struct { state protoimpl.MessageState @@ -1114,6 +1123,60 @@ func (x *MethodSettings) GetAutoPopulatedFields() []string { return nil } +// Experimental features to be included during client library generation. +// These fields will be deprecated once the feature graduates and is enabled +// by default. +type PythonSettings_ExperimentalFeatures struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Enables generation of asynchronous REST clients if `rest` transport is + // enabled. By default, asynchronous REST clients will not be generated. + // This feature will be enabled by default 1 month after launching the + // feature in preview packages. + RestAsyncIoEnabled bool `protobuf:"varint,1,opt,name=rest_async_io_enabled,json=restAsyncIoEnabled,proto3" json:"rest_async_io_enabled,omitempty"` +} + +func (x *PythonSettings_ExperimentalFeatures) Reset() { + *x = PythonSettings_ExperimentalFeatures{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PythonSettings_ExperimentalFeatures) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PythonSettings_ExperimentalFeatures) ProtoMessage() {} + +func (x *PythonSettings_ExperimentalFeatures) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PythonSettings_ExperimentalFeatures.ProtoReflect.Descriptor instead. +func (*PythonSettings_ExperimentalFeatures) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{6, 0} +} + +func (x *PythonSettings_ExperimentalFeatures) GetRestAsyncIoEnabled() bool { + if x != nil { + return x.RestAsyncIoEnabled + } + return false +} + // Describes settings to use when generating API methods that use the // long-running operation pattern. // All default values below are from those used in the client library @@ -1142,7 +1205,7 @@ type MethodSettings_LongRunning struct { func (x *MethodSettings_LongRunning) Reset() { *x = MethodSettings_LongRunning{} if protoimpl.UnsafeEnabled { - mi := &file_google_api_client_proto_msgTypes[15] + mi := &file_google_api_client_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1155,7 +1218,7 @@ func (x *MethodSettings_LongRunning) String() string { func (*MethodSettings_LongRunning) ProtoMessage() {} func (x *MethodSettings_LongRunning) ProtoReflect() protoreflect.Message { - mi := &file_google_api_client_proto_msgTypes[15] + mi := &file_google_api_client_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1460,132 +1523,143 @@ var file_google_api_client_proto_rawDesc = []byte{ 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4c, 0x0a, - 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, - 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, - 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4a, 0x0a, 0x0c, 0x4e, - 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, - 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, - 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f, 0x74, 0x6e, - 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xfd, 0x01, + 0x0a, 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x64, 0x0a, 0x15, + 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x5f, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, + 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x14, 0x65, 0x78, + 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x1a, 0x49, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x31, 0x0a, 0x15, 0x72, 0x65, + 0x73, 0x74, 0x5f, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x6f, 0x5f, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x72, 0x65, 0x73, 0x74, 0x41, + 0x73, 0x79, 0x6e, 0x63, 0x49, 0x6f, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x4a, 0x0a, + 0x0c, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, + 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f, + 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, + 0x6d, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, + 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, + 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, + 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, + 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x12, 0x38, 0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61, + 0x6e, 0x64, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64, + 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, + 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, - 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, - 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, - 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, - 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, - 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x69, 0x67, - 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x38, - 0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61, 0x6e, 0x64, - 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72, - 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, - 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, 0x62, 0x79, - 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, - 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, - 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xc2, - 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, - 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x49, 0x0a, - 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, - 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e, - 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x75, 0x74, 0x6f, - 0x5f, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x50, 0x6f, 0x70, - 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x94, 0x02, 0x0a, - 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, 0x0a, 0x12, - 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, - 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x22, 0xc2, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, + 0x49, 0x0a, 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c, + 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x75, + 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x50, + 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x94, + 0x02, 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, + 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, + 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, + 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, + 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, + 0x79, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, + 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, + 0x6d, 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, - 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, - 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x4d, - 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, 0x61, 0x78, - 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6d, 0x61, - 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, 0x74, 0x6f, - 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, - 0x6f, 0x75, 0x74, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, - 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, - 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49, 0x4f, 0x4e, - 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, - 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, 0x53, - 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03, 0x12, 0x0f, - 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, 0x04, 0x12, - 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x07, 0x0a, - 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, - 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18, 0x43, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, - 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, 0x41, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, 0x12, 0x13, 0x0a, - 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x52, - 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x6d, - 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x3a, 0x43, - 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1f, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x48, - 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, 0x63, 0x6f, - 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x61, 0x75, - 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b, 0x61, 0x70, 0x69, 0x5f, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab, 0xfa, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x69, - 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, - 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, - 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, + 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, + 0x44, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03, + 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, + 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, + 0x07, 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, 0x45, + 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18, 0x43, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, 0x4e, + 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, + 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, 0x12, + 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, + 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, + 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, + 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, + 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, + 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b, 0x61, 0x70, + 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab, 0xfa, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x42, 0x69, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( @@ -1601,34 +1675,35 @@ func file_google_api_client_proto_rawDescGZIP() []byte { } var file_google_api_client_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 16) +var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 17) var file_google_api_client_proto_goTypes = []interface{}{ - (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization - (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination - (*CommonLanguageSettings)(nil), // 2: google.api.CommonLanguageSettings - (*ClientLibrarySettings)(nil), // 3: google.api.ClientLibrarySettings - (*Publishing)(nil), // 4: google.api.Publishing - (*JavaSettings)(nil), // 5: google.api.JavaSettings - (*CppSettings)(nil), // 6: google.api.CppSettings - (*PhpSettings)(nil), // 7: google.api.PhpSettings - (*PythonSettings)(nil), // 8: google.api.PythonSettings - (*NodeSettings)(nil), // 9: google.api.NodeSettings - (*DotnetSettings)(nil), // 10: google.api.DotnetSettings - (*RubySettings)(nil), // 11: google.api.RubySettings - (*GoSettings)(nil), // 12: google.api.GoSettings - (*MethodSettings)(nil), // 13: google.api.MethodSettings - nil, // 14: google.api.JavaSettings.ServiceClassNamesEntry - nil, // 15: google.api.DotnetSettings.RenamedServicesEntry - nil, // 16: google.api.DotnetSettings.RenamedResourcesEntry - (*MethodSettings_LongRunning)(nil), // 17: google.api.MethodSettings.LongRunning - (api.LaunchStage)(0), // 18: google.api.LaunchStage - (*durationpb.Duration)(nil), // 19: google.protobuf.Duration - (*descriptorpb.MethodOptions)(nil), // 20: google.protobuf.MethodOptions - (*descriptorpb.ServiceOptions)(nil), // 21: google.protobuf.ServiceOptions + (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization + (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination + (*CommonLanguageSettings)(nil), // 2: google.api.CommonLanguageSettings + (*ClientLibrarySettings)(nil), // 3: google.api.ClientLibrarySettings + (*Publishing)(nil), // 4: google.api.Publishing + (*JavaSettings)(nil), // 5: google.api.JavaSettings + (*CppSettings)(nil), // 6: google.api.CppSettings + (*PhpSettings)(nil), // 7: google.api.PhpSettings + (*PythonSettings)(nil), // 8: google.api.PythonSettings + (*NodeSettings)(nil), // 9: google.api.NodeSettings + (*DotnetSettings)(nil), // 10: google.api.DotnetSettings + (*RubySettings)(nil), // 11: google.api.RubySettings + (*GoSettings)(nil), // 12: google.api.GoSettings + (*MethodSettings)(nil), // 13: google.api.MethodSettings + nil, // 14: google.api.JavaSettings.ServiceClassNamesEntry + (*PythonSettings_ExperimentalFeatures)(nil), // 15: google.api.PythonSettings.ExperimentalFeatures + nil, // 16: google.api.DotnetSettings.RenamedServicesEntry + nil, // 17: google.api.DotnetSettings.RenamedResourcesEntry + (*MethodSettings_LongRunning)(nil), // 18: google.api.MethodSettings.LongRunning + (api.LaunchStage)(0), // 19: google.api.LaunchStage + (*durationpb.Duration)(nil), // 20: google.protobuf.Duration + (*descriptorpb.MethodOptions)(nil), // 21: google.protobuf.MethodOptions + (*descriptorpb.ServiceOptions)(nil), // 22: google.protobuf.ServiceOptions } var file_google_api_client_proto_depIdxs = []int32{ 1, // 0: google.api.CommonLanguageSettings.destinations:type_name -> google.api.ClientLibraryDestination - 18, // 1: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage + 19, // 1: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage 5, // 2: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings 6, // 3: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings 7, // 4: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings @@ -1645,25 +1720,26 @@ var file_google_api_client_proto_depIdxs = []int32{ 2, // 15: google.api.CppSettings.common:type_name -> google.api.CommonLanguageSettings 2, // 16: google.api.PhpSettings.common:type_name -> google.api.CommonLanguageSettings 2, // 17: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 18: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 19: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings - 15, // 20: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry - 16, // 21: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry - 2, // 22: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 23: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings - 17, // 24: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning - 19, // 25: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration - 19, // 26: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration - 19, // 27: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration - 20, // 28: google.api.method_signature:extendee -> google.protobuf.MethodOptions - 21, // 29: google.api.default_host:extendee -> google.protobuf.ServiceOptions - 21, // 30: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions - 21, // 31: google.api.api_version:extendee -> google.protobuf.ServiceOptions - 32, // [32:32] is the sub-list for method output_type - 32, // [32:32] is the sub-list for method input_type - 32, // [32:32] is the sub-list for extension type_name - 28, // [28:32] is the sub-list for extension extendee - 0, // [0:28] is the sub-list for field type_name + 15, // 18: google.api.PythonSettings.experimental_features:type_name -> google.api.PythonSettings.ExperimentalFeatures + 2, // 19: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 20: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings + 16, // 21: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry + 17, // 22: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry + 2, // 23: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 24: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings + 18, // 25: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning + 20, // 26: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration + 20, // 27: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration + 20, // 28: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration + 21, // 29: google.api.method_signature:extendee -> google.protobuf.MethodOptions + 22, // 30: google.api.default_host:extendee -> google.protobuf.ServiceOptions + 22, // 31: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions + 22, // 32: google.api.api_version:extendee -> google.protobuf.ServiceOptions + 33, // [33:33] is the sub-list for method output_type + 33, // [33:33] is the sub-list for method input_type + 33, // [33:33] is the sub-list for extension type_name + 29, // [29:33] is the sub-list for extension extendee + 0, // [0:29] is the sub-list for field type_name } func init() { file_google_api_client_proto_init() } @@ -1816,7 +1892,19 @@ func file_google_api_client_proto_init() { return nil } } - file_google_api_client_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_google_api_client_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PythonSettings_ExperimentalFeatures); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MethodSettings_LongRunning); i { case 0: return &v.state @@ -1835,7 +1923,7 @@ func file_google_api_client_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_api_client_proto_rawDesc, NumEnums: 2, - NumMessages: 16, + NumMessages: 17, NumExtensions: 4, NumServices: 0, }, diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index 0854d298e4133..d9bfa6e1e7c08 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -4,7 +4,7 @@ We definitely welcome your patches and contributions to gRPC! Please read the gR organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md) and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding. -If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) +If you are new to GitHub, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) ## Legal requirements @@ -25,8 +25,8 @@ How to get your contributions merged smoothly and quickly. is a great place to start. These issues are well-documented and usually can be resolved with a single pull request. -- If you are adding a new file, make sure it has the copyright message template - at the top as a comment. You can copy over the message from an existing file +- If you are adding a new file, make sure it has the copyright message template + at the top as a comment. You can copy over the message from an existing file and update the year. - The grpc package should only depend on standard Go packages and a small number @@ -39,12 +39,12 @@ How to get your contributions merged smoothly and quickly. proposal](https://github.com/grpc/proposal). - Provide a good **PR description** as a record of **what** change is being made - and **why** it was made. Link to a github issue if it exists. + and **why** it was made. Link to a GitHub issue if it exists. -- If you want to fix formatting or style, consider whether your changes are an - obvious improvement or might be considered a personal preference. If a style - change is based on preference, it likely will not be accepted. If it corrects - widely agreed-upon anti-patterns, then please do create a PR and explain the +- If you want to fix formatting or style, consider whether your changes are an + obvious improvement or might be considered a personal preference. If a style + change is based on preference, it likely will not be accepted. If it corrects + widely agreed-upon anti-patterns, then please do create a PR and explain the benefits of the change. - Unless your PR is trivial, you should expect there will be reviewer comments diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md index 6a8a07781ae34..5d4096d46a048 100644 --- a/vendor/google.golang.org/grpc/MAINTAINERS.md +++ b/vendor/google.golang.org/grpc/MAINTAINERS.md @@ -9,21 +9,28 @@ for general contribution guidelines. ## Maintainers (in alphabetical order) +- [aranjans](https://github.com/aranjans), Google LLC +- [arjan-bal](https://github.com/arjan-bal), Google LLC +- [arvindbr8](https://github.com/arvindbr8), Google LLC - [atollena](https://github.com/atollena), Datadog, Inc. -- [cesarghali](https://github.com/cesarghali), Google LLC - [dfawley](https://github.com/dfawley), Google LLC - [easwars](https://github.com/easwars), Google LLC -- [menghanl](https://github.com/menghanl), Google LLC -- [srini100](https://github.com/srini100), Google LLC +- [erm-g](https://github.com/erm-g), Google LLC +- [gtcooke94](https://github.com/gtcooke94), Google LLC +- [purnesh42h](https://github.com/purnesh42h), Google LLC +- [zasweq](https://github.com/zasweq), Google LLC ## Emeritus Maintainers (in alphabetical order) -- [adelez](https://github.com/adelez), Google LLC -- [canguler](https://github.com/canguler), Google LLC -- [iamqizhao](https://github.com/iamqizhao), Google LLC -- [jadekler](https://github.com/jadekler), Google LLC -- [jtattermusch](https://github.com/jtattermusch), Google LLC -- [lyuxuan](https://github.com/lyuxuan), Google LLC -- [makmukhi](https://github.com/makmukhi), Google LLC -- [matt-kwong](https://github.com/matt-kwong), Google LLC -- [nicolasnoble](https://github.com/nicolasnoble), Google LLC -- [yongni](https://github.com/yongni), Google LLC +- [adelez](https://github.com/adelez) +- [canguler](https://github.com/canguler) +- [cesarghali](https://github.com/cesarghali) +- [iamqizhao](https://github.com/iamqizhao) +- [jeanbza](https://github.com/jeanbza) +- [jtattermusch](https://github.com/jtattermusch) +- [lyuxuan](https://github.com/lyuxuan) +- [makmukhi](https://github.com/makmukhi) +- [matt-kwong](https://github.com/matt-kwong) +- [menghanl](https://github.com/menghanl) +- [nicolasnoble](https://github.com/nicolasnoble) +- [srini100](https://github.com/srini100) +- [yongni](https://github.com/yongni) diff --git a/vendor/google.golang.org/grpc/SECURITY.md b/vendor/google.golang.org/grpc/SECURITY.md index be6e108705c48..abab279379ba8 100644 --- a/vendor/google.golang.org/grpc/SECURITY.md +++ b/vendor/google.golang.org/grpc/SECURITY.md @@ -1,3 +1,3 @@ # Security Policy -For information on gRPC Security Policy and reporting potentional security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md). +For information on gRPC Security Policy and reporting potential security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md). diff --git a/vendor/google.golang.org/grpc/backoff/backoff.go b/vendor/google.golang.org/grpc/backoff/backoff.go index 0787d0b50ce94..d7b40b7cb66f1 100644 --- a/vendor/google.golang.org/grpc/backoff/backoff.go +++ b/vendor/google.golang.org/grpc/backoff/backoff.go @@ -39,7 +39,7 @@ type Config struct { MaxDelay time.Duration } -// DefaultConfig is a backoff configuration with the default values specfied +// DefaultConfig is a backoff configuration with the default values specified // at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. // // This should be useful for callers who want to configure backoff with diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index f391744f7299b..3a2092f1056e3 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -30,6 +30,7 @@ import ( "google.golang.org/grpc/channelz" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" + estats "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" "google.golang.org/grpc/metadata" @@ -72,8 +73,21 @@ func unregisterForTesting(name string) { delete(m, name) } +// connectedAddress returns the connected address for a SubConnState. The +// address is only valid if the state is READY. +func connectedAddress(scs SubConnState) resolver.Address { + return scs.connectedAddress +} + +// setConnectedAddress sets the connected address for a SubConnState. +func setConnectedAddress(scs *SubConnState, addr resolver.Address) { + scs.connectedAddress = addr +} + func init() { internal.BalancerUnregister = unregisterForTesting + internal.ConnectedAddress = connectedAddress + internal.SetConnectedAddress = setConnectedAddress } // Get returns the resolver builder registered with the given name. @@ -116,7 +130,7 @@ type SubConn interface { // UpdateAddresses updates the addresses used in this SubConn. // gRPC checks if currently-connected address is still in the new list. // If it's in the list, the connection will be kept. - // If it's not in the list, the connection will gracefully closed, and + // If it's not in the list, the connection will gracefully close, and // a new connection will be created. // // This will trigger a state transition for the SubConn. @@ -128,8 +142,11 @@ type SubConn interface { Connect() // GetOrBuildProducer returns a reference to the existing Producer for this // ProducerBuilder in this SubConn, or, if one does not currently exist, - // creates a new one and returns it. Returns a close function which must - // be called when the Producer is no longer needed. + // creates a new one and returns it. Returns a close function which may be + // called when the Producer is no longer needed. Otherwise the producer + // will automatically be closed upon connection loss or subchannel close. + // Should only be called on a SubConn in state Ready. Otherwise the + // producer will be unable to create streams. GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) // Shutdown shuts down the SubConn gracefully. Any started RPCs will be // allowed to complete. No future calls should be made on the SubConn. @@ -243,6 +260,10 @@ type BuildOptions struct { // same resolver.Target as passed to the resolver. See the documentation for // the resolver.Target type for details about what it contains. Target resolver.Target + // MetricsRecorder is the metrics recorder that balancers can use to record + // metrics. Balancer implementations which do not register metrics on + // metrics registry and record on them can ignore this field. + MetricsRecorder estats.MetricsRecorder } // Builder creates a balancer. @@ -410,6 +431,9 @@ type SubConnState struct { // ConnectionError is set if the ConnectivityState is TransientFailure, // describing the reason the SubConn failed. Otherwise, it is nil. ConnectionError error + // connectedAddr contains the connected address when ConnectivityState is + // Ready. Otherwise, it is indeterminate. + connectedAddress resolver.Address } // ClientConnState describes the state of a ClientConn relevant to the @@ -431,8 +455,10 @@ type ProducerBuilder interface { // Build creates a Producer. The first parameter is always a // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the // associated SubConn), but is declared as `any` to avoid a dependency - // cycle. Should also return a close function that will be called when all - // references to the Producer have been given up. + // cycle. Build also returns a close function that will be called when all + // references to the Producer have been given up for a SubConn, or when a + // connectivity state change occurs on the SubConn. The close function + // should always block until all asynchronous cleanup work is completed. Build(grpcClientConnInterface any) (p Producer, close func()) } diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index a7f1eeec8e6ae..d5ed172ae6958 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -36,7 +36,7 @@ type baseBuilder struct { config Config } -func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { +func (bb *baseBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { bal := &baseBalancer{ cc: cc, pickerBuilder: bb.pickerBuilder, @@ -133,7 +133,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { } } // If resolver state contains no addresses, return an error so ClientConn - // will trigger re-resolve. Also records this as an resolver error, so when + // will trigger re-resolve. Also records this as a resolver error, so when // the overall state turns transient failure, the error message will have // the zero address information. if len(s.ResolverState.Addresses) == 0 { @@ -259,6 +259,6 @@ type errPicker struct { err error // Pick() always returns this err. } -func (p *errPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { +func (p *errPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { return balancer.PickResult{}, p.err } diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go b/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go new file mode 100644 index 0000000000000..c519789458449 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go @@ -0,0 +1,24 @@ +/* + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains code internal to the pickfirst package. +package internal + +import "math/rand" + +// RandShuffle pseudo-randomizes the order of addresses. +var RandShuffle = rand.Shuffle diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go index 07527603f1d4e..e069346a7565d 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go @@ -26,18 +26,23 @@ import ( "math/rand" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/pickfirst/internal" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/envconfig" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" + + _ "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" // For automatically registering the new pickfirst if required. ) func init() { + if envconfig.NewPickFirstEnabled { + return + } balancer.Register(pickfirstBuilder{}) - internal.ShuffleAddressListForTesting = func(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } } var logger = grpclog.Component("pick-first-lb") @@ -50,7 +55,7 @@ const ( type pickfirstBuilder struct{} -func (pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { +func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { b := &pickfirstBalancer{cc: cc} b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) return b @@ -103,10 +108,13 @@ func (b *pickfirstBalancer) ResolverError(err error) { }) } +// Shuffler is an interface for shuffling an address list. type Shuffler interface { ShuffleAddressListForTesting(n int, swap func(i, j int)) } +// ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n +// is the number of elements. swap swaps the elements with indexes i and j. func ShuffleAddressListForTesting(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { @@ -140,7 +148,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // within each endpoint. - A61 if cfg.ShuffleAddressList { endpoints = append([]resolver.Endpoint{}, endpoints...) - internal.ShuffleAddressListForTesting.(func(int, func(int, int)))(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) + internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) } // "Flatten the list by concatenating the ordered list of addresses for each @@ -155,7 +163,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // Endpoints not set, process addresses until we migrate resolver // emissions fully to Endpoints. The top channel does wrap emitted // addresses with endpoints, however some balancers such as weighted - // target do not forwarrd the corresponding correct endpoints down/split + // target do not forward the corresponding correct endpoints down/split // endpoints properly. Once all balancers correctly forward endpoints // down, can delete this else conditional. addrs = state.ResolverState.Addresses diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go new file mode 100644 index 0000000000000..985b6edc7f4c3 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go @@ -0,0 +1,625 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package pickfirstleaf contains the pick_first load balancing policy which +// will be the universal leaf policy after dualstack changes are implemented. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package pickfirstleaf + +import ( + "encoding/json" + "errors" + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/pickfirst/internal" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/envconfig" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +func init() { + if envconfig.NewPickFirstEnabled { + // Register as the default pick_first balancer. + Name = "pick_first" + } + balancer.Register(pickfirstBuilder{}) +} + +var ( + logger = grpclog.Component("pick-first-leaf-lb") + // Name is the name of the pick_first_leaf balancer. + // It is changed to "pick_first" in init() if this balancer is to be + // registered as the default pickfirst. + Name = "pick_first_leaf" +) + +// TODO: change to pick-first when this becomes the default pick_first policy. +const logPrefix = "[pick-first-leaf-lb %p] " + +type pickfirstBuilder struct{} + +func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { + b := &pickfirstBalancer{ + cc: cc, + addressList: addressList{}, + subConns: resolver.NewAddressMap(), + state: connectivity.Connecting, + mu: sync.Mutex{}, + } + b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) + return b +} + +func (b pickfirstBuilder) Name() string { + return Name +} + +func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + var cfg pfConfig + if err := json.Unmarshal(js, &cfg); err != nil { + return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) + } + return cfg, nil +} + +type pfConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + // If set to true, instructs the LB policy to shuffle the order of the list + // of endpoints received from the name resolver before attempting to + // connect to them. + ShuffleAddressList bool `json:"shuffleAddressList"` +} + +// scData keeps track of the current state of the subConn. +// It is not safe for concurrent access. +type scData struct { + // The following fields are initialized at build time and read-only after + // that. + subConn balancer.SubConn + addr resolver.Address + + state connectivity.State + lastErr error +} + +func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) { + sd := &scData{ + state: connectivity.Idle, + addr: addr, + } + sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{ + StateListener: func(state balancer.SubConnState) { + b.updateSubConnState(sd, state) + }, + }) + if err != nil { + return nil, err + } + sd.subConn = sc + return sd, nil +} + +type pickfirstBalancer struct { + // The following fields are initialized at build time and read-only after + // that and therefore do not need to be guarded by a mutex. + logger *internalgrpclog.PrefixLogger + cc balancer.ClientConn + + // The mutex is used to ensure synchronization of updates triggered + // from the idle picker and the already serialized resolver, + // SubConn state updates. + mu sync.Mutex + state connectivity.State + // scData for active subonns mapped by address. + subConns *resolver.AddressMap + addressList addressList + firstPass bool + numTF int +} + +// ResolverError is called by the ClientConn when the name resolver produces +// an error or when pickfirst determined the resolver update to be invalid. +func (b *pickfirstBalancer) ResolverError(err error) { + b.mu.Lock() + defer b.mu.Unlock() + b.resolverErrorLocked(err) +} + +func (b *pickfirstBalancer) resolverErrorLocked(err error) { + if b.logger.V(2) { + b.logger.Infof("Received error from the name resolver: %v", err) + } + + // The picker will not change since the balancer does not currently + // report an error. If the balancer hasn't received a single good resolver + // update yet, transition to TRANSIENT_FAILURE. + if b.state != connectivity.TransientFailure && b.addressList.size() > 0 { + if b.logger.V(2) { + b.logger.Infof("Ignoring resolver error because balancer is using a previous good update.") + } + return + } + + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, + }) +} + +func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { + b.mu.Lock() + defer b.mu.Unlock() + if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 { + // Cleanup state pertaining to the previous resolver state. + // Treat an empty address list like an error by calling b.ResolverError. + b.state = connectivity.TransientFailure + b.closeSubConnsLocked() + b.addressList.updateAddrs(nil) + b.resolverErrorLocked(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } + cfg, ok := state.BalancerConfig.(pfConfig) + if state.BalancerConfig != nil && !ok { + return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState) + } + + if b.logger.V(2) { + b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) + } + + var newAddrs []resolver.Address + if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 { + // Perform the optional shuffling described in gRFC A62. The shuffling + // will change the order of endpoints but not touch the order of the + // addresses within each endpoint. - A61 + if cfg.ShuffleAddressList { + endpoints = append([]resolver.Endpoint{}, endpoints...) + internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) + } + + // "Flatten the list by concatenating the ordered list of addresses for + // each of the endpoints, in order." - A61 + for _, endpoint := range endpoints { + // "In the flattened list, interleave addresses from the two address + // families, as per RFC-8305 section 4." - A61 + // TODO: support the above language. + newAddrs = append(newAddrs, endpoint.Addresses...) + } + } else { + // Endpoints not set, process addresses until we migrate resolver + // emissions fully to Endpoints. The top channel does wrap emitted + // addresses with endpoints, however some balancers such as weighted + // target do not forward the corresponding correct endpoints down/split + // endpoints properly. Once all balancers correctly forward endpoints + // down, can delete this else conditional. + newAddrs = state.ResolverState.Addresses + if cfg.ShuffleAddressList { + newAddrs = append([]resolver.Address{}, newAddrs...) + internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) + } + } + + // If an address appears in multiple endpoints or in the same endpoint + // multiple times, we keep it only once. We will create only one SubConn + // for the address because an AddressMap is used to store SubConns. + // Not de-duplicating would result in attempting to connect to the same + // SubConn multiple times in the same pass. We don't want this. + newAddrs = deDupAddresses(newAddrs) + + // Since we have a new set of addresses, we are again at first pass. + b.firstPass = true + + // If the previous ready SubConn exists in new address list, + // keep this connection and don't create new SubConns. + prevAddr := b.addressList.currentAddress() + prevAddrsCount := b.addressList.size() + b.addressList.updateAddrs(newAddrs) + if b.state == connectivity.Ready && b.addressList.seekTo(prevAddr) { + return nil + } + + b.reconcileSubConnsLocked(newAddrs) + // If it's the first resolver update or the balancer was already READY + // (but the new address list does not contain the ready SubConn) or + // CONNECTING, enter CONNECTING. + // We may be in TRANSIENT_FAILURE due to a previous empty address list, + // we should still enter CONNECTING because the sticky TF behaviour + // mentioned in A62 applies only when the TRANSIENT_FAILURE is reported + // due to connectivity failures. + if b.state == connectivity.Ready || b.state == connectivity.Connecting || prevAddrsCount == 0 { + // Start connection attempt at first address. + b.state = connectivity.Connecting + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + b.requestConnectionLocked() + } else if b.state == connectivity.TransientFailure { + // If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until + // we're READY. See A62. + b.requestConnectionLocked() + } + return nil +} + +// UpdateSubConnState is unused as a StateListener is always registered when +// creating SubConns. +func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state) +} + +func (b *pickfirstBalancer) Close() { + b.mu.Lock() + defer b.mu.Unlock() + b.closeSubConnsLocked() + b.state = connectivity.Shutdown +} + +// ExitIdle moves the balancer out of idle state. It can be called concurrently +// by the idlePicker and clientConn so access to variables should be +// synchronized. +func (b *pickfirstBalancer) ExitIdle() { + b.mu.Lock() + defer b.mu.Unlock() + if b.state == connectivity.Idle && b.addressList.currentAddress() == b.addressList.first() { + b.firstPass = true + b.requestConnectionLocked() + } +} + +func (b *pickfirstBalancer) closeSubConnsLocked() { + for _, sd := range b.subConns.Values() { + sd.(*scData).subConn.Shutdown() + } + b.subConns = resolver.NewAddressMap() +} + +// deDupAddresses ensures that each address appears only once in the slice. +func deDupAddresses(addrs []resolver.Address) []resolver.Address { + seenAddrs := resolver.NewAddressMap() + retAddrs := []resolver.Address{} + + for _, addr := range addrs { + if _, ok := seenAddrs.Get(addr); ok { + continue + } + retAddrs = append(retAddrs, addr) + } + return retAddrs +} + +// reconcileSubConnsLocked updates the active subchannels based on a new address +// list from the resolver. It does this by: +// - closing subchannels: any existing subchannels associated with addresses +// that are no longer in the updated list are shut down. +// - removing subchannels: entries for these closed subchannels are removed +// from the subchannel map. +// +// This ensures that the subchannel map accurately reflects the current set of +// addresses received from the name resolver. +func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) { + newAddrsMap := resolver.NewAddressMap() + for _, addr := range newAddrs { + newAddrsMap.Set(addr, true) + } + + for _, oldAddr := range b.subConns.Keys() { + if _, ok := newAddrsMap.Get(oldAddr); ok { + continue + } + val, _ := b.subConns.Get(oldAddr) + val.(*scData).subConn.Shutdown() + b.subConns.Delete(oldAddr) + } +} + +// shutdownRemainingLocked shuts down remaining subConns. Called when a subConn +// becomes ready, which means that all other subConn must be shutdown. +func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) { + for _, v := range b.subConns.Values() { + sd := v.(*scData) + if sd.subConn != selected.subConn { + sd.subConn.Shutdown() + } + } + b.subConns = resolver.NewAddressMap() + b.subConns.Set(selected.addr, selected) +} + +// requestConnectionLocked starts connecting on the subchannel corresponding to +// the current address. If no subchannel exists, one is created. If the current +// subchannel is in TransientFailure, a connection to the next address is +// attempted until a subchannel is found. +func (b *pickfirstBalancer) requestConnectionLocked() { + if !b.addressList.isValid() { + return + } + var lastErr error + for valid := true; valid; valid = b.addressList.increment() { + curAddr := b.addressList.currentAddress() + sd, ok := b.subConns.Get(curAddr) + if !ok { + var err error + // We want to assign the new scData to sd from the outer scope, + // hence we can't use := below. + sd, err = b.newSCData(curAddr) + if err != nil { + // This should never happen, unless the clientConn is being shut + // down. + if b.logger.V(2) { + b.logger.Infof("Failed to create a subConn for address %v: %v", curAddr.String(), err) + } + // Do nothing, the LB policy will be closed soon. + return + } + b.subConns.Set(curAddr, sd) + } + + scd := sd.(*scData) + switch scd.state { + case connectivity.Idle: + scd.subConn.Connect() + case connectivity.TransientFailure: + // Try the next address. + lastErr = scd.lastErr + continue + case connectivity.Ready: + // Should never happen. + b.logger.Errorf("Requesting a connection even though we have a READY SubConn") + case connectivity.Shutdown: + // Should never happen. + b.logger.Errorf("SubConn with state SHUTDOWN present in SubConns map") + case connectivity.Connecting: + // Wait for the SubConn to report success or failure. + } + return + } + // All the remaining addresses in the list are in TRANSIENT_FAILURE, end the + // first pass. + b.endFirstPassLocked(lastErr) +} + +func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) { + b.mu.Lock() + defer b.mu.Unlock() + oldState := sd.state + sd.state = newState.ConnectivityState + // Previously relevant SubConns can still callback with state updates. + // To prevent pickers from returning these obsolete SubConns, this logic + // is included to check if the current list of active SubConns includes this + // SubConn. + if activeSD, found := b.subConns.Get(sd.addr); !found || activeSD != sd { + return + } + if newState.ConnectivityState == connectivity.Shutdown { + return + } + + if newState.ConnectivityState == connectivity.Ready { + b.shutdownRemainingLocked(sd) + if !b.addressList.seekTo(sd.addr) { + // This should not fail as we should have only one SubConn after + // entering READY. The SubConn should be present in the addressList. + b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses) + return + } + b.state = connectivity.Ready + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, + }) + return + } + + // If the LB policy is READY, and it receives a subchannel state change, + // it means that the READY subchannel has failed. + // A SubConn can also transition from CONNECTING directly to IDLE when + // a transport is successfully created, but the connection fails + // before the SubConn can send the notification for READY. We treat + // this as a successful connection and transition to IDLE. + if (b.state == connectivity.Ready && newState.ConnectivityState != connectivity.Ready) || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) { + // Once a transport fails, the balancer enters IDLE and starts from + // the first address when the picker is used. + b.shutdownRemainingLocked(sd) + b.state = connectivity.Idle + b.addressList.reset() + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Idle, + Picker: &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)}, + }) + return + } + + if b.firstPass { + switch newState.ConnectivityState { + case connectivity.Connecting: + // The balancer can be in either IDLE, CONNECTING or + // TRANSIENT_FAILURE. If it's in TRANSIENT_FAILURE, stay in + // TRANSIENT_FAILURE until it's READY. See A62. + // If the balancer is already in CONNECTING, no update is needed. + if b.state == connectivity.Idle { + b.state = connectivity.Connecting + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + } + case connectivity.TransientFailure: + sd.lastErr = newState.ConnectionError + // Since we're re-using common SubConns while handling resolver + // updates, we could receive an out of turn TRANSIENT_FAILURE from + // a pass over the previous address list. We ignore such updates. + + if curAddr := b.addressList.currentAddress(); !equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) { + return + } + if b.addressList.increment() { + b.requestConnectionLocked() + return + } + // End of the first pass. + b.endFirstPassLocked(newState.ConnectionError) + } + return + } + + // We have finished the first pass, keep re-connecting failing SubConns. + switch newState.ConnectivityState { + case connectivity.TransientFailure: + b.numTF = (b.numTF + 1) % b.subConns.Len() + sd.lastErr = newState.ConnectionError + if b.numTF%b.subConns.Len() == 0 { + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: newState.ConnectionError}, + }) + } + // We don't need to request re-resolution since the SubConn already + // does that before reporting TRANSIENT_FAILURE. + // TODO: #7534 - Move re-resolution requests from SubConn into + // pick_first. + case connectivity.Idle: + sd.subConn.Connect() + } +} + +func (b *pickfirstBalancer) endFirstPassLocked(lastErr error) { + b.firstPass = false + b.numTF = 0 + b.state = connectivity.TransientFailure + + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: lastErr}, + }) + // Start re-connecting all the SubConns that are already in IDLE. + for _, v := range b.subConns.Values() { + sd := v.(*scData) + if sd.state == connectivity.Idle { + sd.subConn.Connect() + } + } +} + +type picker struct { + result balancer.PickResult + err error +} + +func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + return p.result, p.err +} + +// idlePicker is used when the SubConn is IDLE and kicks the SubConn into +// CONNECTING when Pick is called. +type idlePicker struct { + exitIdle func() +} + +func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + i.exitIdle() + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable +} + +// addressList manages sequentially iterating over addresses present in a list +// of endpoints. It provides a 1 dimensional view of the addresses present in +// the endpoints. +// This type is not safe for concurrent access. +type addressList struct { + addresses []resolver.Address + idx int +} + +func (al *addressList) isValid() bool { + return al.idx < len(al.addresses) +} + +func (al *addressList) size() int { + return len(al.addresses) +} + +// increment moves to the next index in the address list. +// This method returns false if it went off the list, true otherwise. +func (al *addressList) increment() bool { + if !al.isValid() { + return false + } + al.idx++ + return al.idx < len(al.addresses) +} + +// currentAddress returns the current address pointed to in the addressList. +// If the list is in an invalid state, it returns an empty address instead. +func (al *addressList) currentAddress() resolver.Address { + if !al.isValid() { + return resolver.Address{} + } + return al.addresses[al.idx] +} + +// first returns the first address in the list. If the list is empty, it returns +// an empty address instead. +func (al *addressList) first() resolver.Address { + if len(al.addresses) == 0 { + return resolver.Address{} + } + return al.addresses[0] +} + +func (al *addressList) reset() { + al.idx = 0 +} + +func (al *addressList) updateAddrs(addrs []resolver.Address) { + al.addresses = addrs + al.reset() +} + +// seekTo returns false if the needle was not found and the current index was +// left unchanged. +func (al *addressList) seekTo(needle resolver.Address) bool { + for ai, addr := range al.addresses { + if !equalAddressIgnoringBalAttributes(&addr, &needle) { + continue + } + al.idx = ai + return true + } + return false +} + +// equalAddressIgnoringBalAttributes returns true is a and b are considered +// equal. This is different from the Equal method on the resolver.Address type +// which considers all fields to determine equality. Here, we only consider +// fields that are meaningful to the SubConn. +func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool { + return a.Addr == b.Addr && a.ServerName == b.ServerName && + a.Attributes.Equal(b.Attributes) && + a.Metadata == b.Metadata +} diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go index 4161fdf47a8b1..2a4f2878aef45 100644 --- a/vendor/google.golang.org/grpc/balancer_wrapper.go +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -24,13 +24,18 @@ import ( "sync" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" + "google.golang.org/grpc/status" ) +var setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address)) + // ccBalancerWrapper sits between the ClientConn and the Balancer. // // ccBalancerWrapper implements methods corresponding to the ones on the @@ -79,6 +84,7 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { CustomUserAgent: cc.dopts.copts.UserAgent, ChannelzParent: cc.channelz, Target: cc.parsedTarget, + MetricsRecorder: cc.metricsRecorderList, }, serializer: grpcsync.NewCallbackSerializer(ctx), serializerCancel: cancel, @@ -92,7 +98,7 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { // it is safe to call into the balancer here. func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { errCh := make(chan error) - ok := ccb.serializer.Schedule(func(ctx context.Context) { + uccs := func(ctx context.Context) { defer close(errCh) if ctx.Err() != nil || ccb.balancer == nil { return @@ -107,17 +113,23 @@ func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnStat logger.Infof("error from balancer.UpdateClientConnState: %v", err) } errCh <- err - }) - if !ok { - return nil } + onFailure := func() { close(errCh) } + + // UpdateClientConnState can race with Close, and when the latter wins, the + // serializer is closed, and the attempt to schedule the callback will fail. + // It is acceptable to ignore this failure. But since we want to handle the + // state update in a blocking fashion (when we successfully schedule the + // callback), we have to use the ScheduleOr method and not the MaybeSchedule + // method on the serializer. + ccb.serializer.ScheduleOr(uccs, onFailure) return <-errCh } // resolverError is invoked by grpc to push a resolver error to the underlying // balancer. The call to the balancer is executed from the serializer. func (ccb *ccBalancerWrapper) resolverError(err error) { - ccb.serializer.Schedule(func(ctx context.Context) { + ccb.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || ccb.balancer == nil { return } @@ -133,7 +145,7 @@ func (ccb *ccBalancerWrapper) close() { ccb.closed = true ccb.mu.Unlock() channelz.Info(logger, ccb.cc.channelz, "ccBalancerWrapper: closing") - ccb.serializer.Schedule(func(context.Context) { + ccb.serializer.TrySchedule(func(context.Context) { if ccb.balancer == nil { return } @@ -145,7 +157,7 @@ func (ccb *ccBalancerWrapper) close() { // exitIdle invokes the balancer's exitIdle method in the serializer. func (ccb *ccBalancerWrapper) exitIdle() { - ccb.serializer.Schedule(func(ctx context.Context) { + ccb.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || ccb.balancer == nil { return } @@ -182,7 +194,7 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer return acbw, nil } -func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { +func (ccb *ccBalancerWrapper) RemoveSubConn(balancer.SubConn) { // The graceful switch balancer will never call this. logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc") } @@ -246,21 +258,28 @@ type acBalancerWrapper struct { ccb *ccBalancerWrapper // read-only stateListener func(balancer.SubConnState) - mu sync.Mutex - producers map[balancer.ProducerBuilder]*refCountedProducer + producersMu sync.Mutex + producers map[balancer.ProducerBuilder]*refCountedProducer } // updateState is invoked by grpc to push a subConn state update to the // underlying balancer. -func (acbw *acBalancerWrapper) updateState(s connectivity.State, err error) { - acbw.ccb.serializer.Schedule(func(ctx context.Context) { +func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolver.Address, err error) { + acbw.ccb.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || acbw.ccb.balancer == nil { return } + // Invalidate all producers on any state change. + acbw.closeProducers() + // Even though it is optional for balancers, gracefulswitch ensures // opts.StateListener is set, so this cannot ever be nil. // TODO: delete this comment when UpdateSubConnState is removed. - acbw.stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) + scs := balancer.SubConnState{ConnectivityState: s, ConnectionError: err} + if s == connectivity.Ready { + setConnectedAddress(&scs, curAddr) + } + acbw.stateListener(scs) }) } @@ -277,6 +296,7 @@ func (acbw *acBalancerWrapper) Connect() { } func (acbw *acBalancerWrapper) Shutdown() { + acbw.closeProducers() acbw.ccb.cc.removeAddrConn(acbw.ac, errConnDrain) } @@ -284,9 +304,10 @@ func (acbw *acBalancerWrapper) Shutdown() { // ready, blocks until it is or ctx expires. Returns an error when the context // expires or the addrConn is shut down. func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { - transport, err := acbw.ac.getTransport(ctx) - if err != nil { - return nil, err + transport := acbw.ac.getReadyTransport() + if transport == nil { + return nil, status.Errorf(codes.Unavailable, "SubConn state is not Ready") + } return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...) } @@ -311,15 +332,15 @@ type refCountedProducer struct { } func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) { - acbw.mu.Lock() - defer acbw.mu.Unlock() + acbw.producersMu.Lock() + defer acbw.producersMu.Unlock() // Look up existing producer from this builder. pData := acbw.producers[pb] if pData == nil { // Not found; create a new one and add it to the producers map. - p, close := pb.Build(acbw) - pData = &refCountedProducer{producer: p, close: close} + p, closeFn := pb.Build(acbw) + pData = &refCountedProducer{producer: p, close: closeFn} acbw.producers[pb] = pData } // Account for this new reference. @@ -329,13 +350,26 @@ func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) ( // and delete the refCountedProducer from the map if the total reference // count goes to zero. unref := func() { - acbw.mu.Lock() + acbw.producersMu.Lock() + // If closeProducers has already closed this producer instance, refs is + // set to 0, so the check after decrementing will never pass, and the + // producer will not be double-closed. pData.refs-- if pData.refs == 0 { defer pData.close() // Run outside the acbw mutex delete(acbw.producers, pb) } - acbw.mu.Unlock() + acbw.producersMu.Unlock() } return pData.producer, grpcsync.OnceFunc(unref) } + +func (acbw *acBalancerWrapper) closeProducers() { + acbw.producersMu.Lock() + defer acbw.producersMu.Unlock() + for pb, pData := range acbw.producers { + pData.refs = 0 + pData.close() + delete(acbw.producers, pb) + } +} diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 63c639e4fe933..55bffaa77ef0f 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,8 +18,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/binlog/v1/binarylog.proto package grpc_binarylog_v1 @@ -1015,7 +1015,7 @@ func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte { var file_grpc_binlog_v1_binarylog_proto_enumTypes = make([]protoimpl.EnumInfo, 3) var file_grpc_binlog_v1_binarylog_proto_msgTypes = make([]protoimpl.MessageInfo, 8) -var file_grpc_binlog_v1_binarylog_proto_goTypes = []interface{}{ +var file_grpc_binlog_v1_binarylog_proto_goTypes = []any{ (GrpcLogEntry_EventType)(0), // 0: grpc.binarylog.v1.GrpcLogEntry.EventType (GrpcLogEntry_Logger)(0), // 1: grpc.binarylog.v1.GrpcLogEntry.Logger (Address_Type)(0), // 2: grpc.binarylog.v1.Address.Type @@ -1058,7 +1058,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*GrpcLogEntry); i { case 0: return &v.state @@ -1070,7 +1070,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*ClientHeader); i { case 0: return &v.state @@ -1082,7 +1082,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*ServerHeader); i { case 0: return &v.state @@ -1094,7 +1094,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*Trailer); i { case 0: return &v.state @@ -1106,7 +1106,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*Message); i { case 0: return &v.state @@ -1118,7 +1118,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*Metadata); i { case 0: return &v.state @@ -1130,7 +1130,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*MetadataEntry); i { case 0: return &v.state @@ -1142,7 +1142,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*Address); i { case 0: return &v.state @@ -1155,7 +1155,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { } } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []any{ (*GrpcLogEntry_ClientHeader)(nil), (*GrpcLogEntry_ServerHeader)(nil), (*GrpcLogEntry_Message)(nil), diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 423be7b43b00c..19763f8eddfa9 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -24,6 +24,7 @@ import ( "fmt" "math" "net/url" + "slices" "strings" "sync" "sync/atomic" @@ -39,6 +40,7 @@ import ( "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/idle" iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/stats" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" @@ -194,8 +196,11 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz) cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers) + cc.metricsRecorderList = stats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers) + cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc. cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout) + return cc, nil } @@ -590,13 +595,14 @@ type ClientConn struct { cancel context.CancelFunc // Cancelled on close. // The following are initialized at dial time, and are read-only after that. - target string // User's dial target. - parsedTarget resolver.Target // See initParsedTargetAndResolverBuilder(). - authority string // See initAuthority(). - dopts dialOptions // Default and user specified dial options. - channelz *channelz.Channel // Channelz object. - resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder(). - idlenessMgr *idle.Manager + target string // User's dial target. + parsedTarget resolver.Target // See initParsedTargetAndResolverBuilder(). + authority string // See initAuthority(). + dopts dialOptions // Default and user specified dial options. + channelz *channelz.Channel // Channelz object. + resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder(). + idlenessMgr *idle.Manager + metricsRecorderList *stats.MetricsRecorderList // The following provide their own synchronization, and therefore don't // require cc.mu to be held to access them. @@ -626,11 +632,6 @@ type ClientConn struct { // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool { ch := cc.csMgr.getNotifyChan() if cc.csMgr.getState() != sourceState { @@ -645,11 +646,6 @@ func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connec } // GetState returns the connectivity.State of ClientConn. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a later -// release. func (cc *ClientConn) GetState() connectivity.State { return cc.csMgr.getState() } @@ -812,17 +808,11 @@ func (cc *ClientConn) applyFailingLBLocked(sc *serviceconfig.ParseResult) { cc.csMgr.updateState(connectivity.TransientFailure) } -// Makes a copy of the input addresses slice and clears out the balancer -// attributes field. Addresses are passed during subconn creation and address -// update operations. In both cases, we will clear the balancer attributes by -// calling this function, and therefore we will be able to use the Equal method -// provided by the resolver.Address type for comparison. -func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Address { +// Makes a copy of the input addresses slice. Addresses are passed during +// subconn creation and address update operations. +func copyAddresses(in []resolver.Address) []resolver.Address { out := make([]resolver.Address, len(in)) - for i := range in { - out[i] = in[i] - out[i].BalancerAttributes = nil - } + copy(out, in) return out } @@ -837,12 +827,11 @@ func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer. ac := &addrConn{ state: connectivity.Idle, cc: cc, - addrs: copyAddressesWithoutBalancerAttributes(addrs), + addrs: copyAddresses(addrs), scopts: opts, dopts: cc.dopts, channelz: channelz.RegisterSubChannel(cc.channelz, ""), resetBackoff: make(chan struct{}), - stateChan: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) // Start with our address set to the first address; this may be updated if @@ -918,28 +907,29 @@ func (ac *addrConn) connect() error { ac.mu.Unlock() return nil } - ac.mu.Unlock() - ac.resetTransport() + ac.resetTransportAndUnlock() return nil } -func equalAddresses(a, b []resolver.Address) bool { - if len(a) != len(b) { - return false - } - for i, v := range a { - if !v.Equal(b[i]) { - return false - } - } - return true +// equalAddressIgnoringBalAttributes returns true is a and b are considered equal. +// This is different from the Equal method on the resolver.Address type which +// considers all fields to determine equality. Here, we only consider fields +// that are meaningful to the subConn. +func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool { + return a.Addr == b.Addr && a.ServerName == b.ServerName && + a.Attributes.Equal(b.Attributes) && + a.Metadata == b.Metadata +} + +func equalAddressesIgnoringBalAttributes(a, b []resolver.Address) bool { + return slices.EqualFunc(a, b, func(a, b resolver.Address) bool { return equalAddressIgnoringBalAttributes(&a, &b) }) } // updateAddrs updates ac.addrs with the new addresses list and handles active // connections or connection attempts. func (ac *addrConn) updateAddrs(addrs []resolver.Address) { - addrs = copyAddressesWithoutBalancerAttributes(addrs) + addrs = copyAddresses(addrs) limit := len(addrs) if limit > 5 { limit = 5 @@ -947,7 +937,7 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { channelz.Infof(logger, ac.channelz, "addrConn: updateAddrs addrs (%d of %d): %v", limit, len(addrs), addrs[:limit]) ac.mu.Lock() - if equalAddresses(ac.addrs, addrs) { + if equalAddressesIgnoringBalAttributes(ac.addrs, addrs) { ac.mu.Unlock() return } @@ -966,7 +956,7 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { // Try to find the connected address. for _, a := range addrs { a.ServerName = ac.cc.getServerName(a) - if a.Equal(ac.curAddr) { + if equalAddressIgnoringBalAttributes(&a, &ac.curAddr) { // We are connected to a valid address, so do nothing but // update the addresses. ac.mu.Unlock() @@ -992,11 +982,9 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { ac.updateConnectivityState(connectivity.Idle, nil) } - ac.mu.Unlock() - // Since we were connecting/connected, we should start a new connection // attempt. - go ac.resetTransport() + go ac.resetTransportAndUnlock() } // getServerName determines the serverName to be used in the connection @@ -1152,10 +1140,15 @@ func (cc *ClientConn) Close() error { <-cc.resolverWrapper.serializer.Done() <-cc.balancerWrapper.serializer.Done() - + var wg sync.WaitGroup for ac := range conns { - ac.tearDown(ErrClientConnClosing) + wg.Add(1) + go func(ac *addrConn) { + defer wg.Done() + ac.tearDown(ErrClientConnClosing) + }(ac) } + wg.Wait() cc.addTraceEvent("deleted") // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add // trace reference to the entity being deleted, and thus prevent it from being @@ -1190,8 +1183,7 @@ type addrConn struct { addrs []resolver.Address // All addresses that the resolver resolved to. // Use updateConnectivityState for updating addrConn's connectivity state. - state connectivity.State - stateChan chan struct{} // closed and recreated on every state change. + state connectivity.State backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} @@ -1204,9 +1196,6 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) if ac.state == s { return } - // When changing states, reset the state change channel. - close(ac.stateChan) - ac.stateChan = make(chan struct{}) ac.state = s ac.channelz.ChannelMetrics.State.Store(&s) if lastErr == nil { @@ -1214,7 +1203,7 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) } else { channelz.Infof(logger, ac.channelz, "Subchannel Connectivity change to %v, last error: %s", s, lastErr) } - ac.acbw.updateState(s, lastErr) + ac.acbw.updateState(s, ac.curAddr, lastErr) } // adjustParams updates parameters used to create transports upon @@ -1231,8 +1220,10 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) { } } -func (ac *addrConn) resetTransport() { - ac.mu.Lock() +// resetTransportAndUnlock unconditionally connects the addrConn. +// +// ac.mu must be held by the caller, and this function will guarantee it is released. +func (ac *addrConn) resetTransportAndUnlock() { acCtx := ac.ctx if acCtx.Err() != nil { ac.mu.Unlock() @@ -1263,6 +1254,8 @@ func (ac *addrConn) resetTransport() { ac.mu.Unlock() if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil { + // TODO: #7534 - Move re-resolution requests into the pick_first LB policy + // to ensure one resolution request per pass instead of per subconn failure. ac.cc.resolveNow(resolver.ResolveNowOptions{}) ac.mu.Lock() if acCtx.Err() != nil { @@ -1304,7 +1297,7 @@ func (ac *addrConn) resetTransport() { ac.mu.Unlock() } -// tryAllAddrs tries to creates a connection to the addresses, and stop when at +// tryAllAddrs tries to create a connection to the addresses, and stop when at // the first successful one. It returns an error if no address was successfully // connected, or updates ac appropriately with the new transport. func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error { @@ -1516,29 +1509,6 @@ func (ac *addrConn) getReadyTransport() transport.ClientTransport { return nil } -// getTransport waits until the addrconn is ready and returns the transport. -// If the context expires first, returns an appropriate status. If the -// addrConn is stopped first, returns an Unavailable status error. -func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) { - for ctx.Err() == nil { - ac.mu.Lock() - t, state, sc := ac.transport, ac.state, ac.stateChan - ac.mu.Unlock() - if state == connectivity.Ready { - return t, nil - } - if state == connectivity.Shutdown { - return nil, status.Errorf(codes.Unavailable, "SubConn shutting down") - } - - select { - case <-ctx.Done(): - case <-sc: - } - } - return nil, status.FromContextError(ctx.Err()).Err() -} - // tearDown starts to tear down the addrConn. // // Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct @@ -1585,7 +1555,7 @@ func (ac *addrConn) tearDown(err error) { } else { // Hard close the transport when the channel is entering idle or is // being shutdown. In the case where the channel is being shutdown, - // closing of transports is also taken care of by cancelation of cc.ctx. + // closing of transports is also taken care of by cancellation of cc.ctx. // But in the case where the channel is entering idle, we need to // explicitly close the transports here. Instead of distinguishing // between these two cases, it is simpler to close the transport diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go index 411e3dfd47ccd..e840858b77b18 100644 --- a/vendor/google.golang.org/grpc/codec.go +++ b/vendor/google.golang.org/grpc/codec.go @@ -21,18 +21,73 @@ package grpc import ( "google.golang.org/grpc/encoding" _ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto" + "google.golang.org/grpc/mem" ) -// baseCodec contains the functionality of both Codec and encoding.Codec, but -// omits the name/string, which vary between the two and are not needed for -// anything besides the registry in the encoding package. +// baseCodec captures the new encoding.CodecV2 interface without the Name +// function, allowing it to be implemented by older Codec and encoding.Codec +// implementations. The omitted Name function is only needed for the register in +// the encoding package and is not part of the core functionality. type baseCodec interface { - Marshal(v any) ([]byte, error) - Unmarshal(data []byte, v any) error + Marshal(v any) (mem.BufferSlice, error) + Unmarshal(data mem.BufferSlice, v any) error +} + +// getCodec returns an encoding.CodecV2 for the codec of the given name (if +// registered). Initially checks the V2 registry with encoding.GetCodecV2 and +// returns the V2 codec if it is registered. Otherwise, it checks the V1 registry +// with encoding.GetCodec and if it is registered wraps it with newCodecV1Bridge +// to turn it into an encoding.CodecV2. Returns nil otherwise. +func getCodec(name string) encoding.CodecV2 { + if codecV1 := encoding.GetCodec(name); codecV1 != nil { + return newCodecV1Bridge(codecV1) + } + + return encoding.GetCodecV2(name) +} + +func newCodecV0Bridge(c Codec) baseCodec { + return codecV0Bridge{codec: c} +} + +func newCodecV1Bridge(c encoding.Codec) encoding.CodecV2 { + return codecV1Bridge{ + codecV0Bridge: codecV0Bridge{codec: c}, + name: c.Name(), + } +} + +var _ baseCodec = codecV0Bridge{} + +type codecV0Bridge struct { + codec interface { + Marshal(v any) ([]byte, error) + Unmarshal(data []byte, v any) error + } +} + +func (c codecV0Bridge) Marshal(v any) (mem.BufferSlice, error) { + data, err := c.codec.Marshal(v) + if err != nil { + return nil, err + } + return mem.BufferSlice{mem.NewBuffer(&data, nil)}, nil +} + +func (c codecV0Bridge) Unmarshal(data mem.BufferSlice, v any) (err error) { + return c.codec.Unmarshal(data.Materialize(), v) } -var _ baseCodec = Codec(nil) -var _ baseCodec = encoding.Codec(nil) +var _ encoding.CodecV2 = codecV1Bridge{} + +type codecV1Bridge struct { + codecV0Bridge + name string +} + +func (c codecV1Bridge) Name() string { + return c.name +} // Codec defines the interface gRPC uses to encode and decode messages. // Note that implementations of this interface must be thread safe; diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go index 82bee1443bfee..4c805c64462c9 100644 --- a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go +++ b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go @@ -40,7 +40,7 @@ func NewCredentials() credentials.TransportCredentials { // NoSecurity. type insecureTC struct{} -func (insecureTC) ClientHandshake(ctx context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) { +func (insecureTC) ClientHandshake(_ context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) { return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil } diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index 4114358545efa..e163a473df93e 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -200,25 +200,40 @@ var tls12ForbiddenCipherSuites = map[uint16]struct{}{ // NewTLS uses c to construct a TransportCredentials based on TLS. func NewTLS(c *tls.Config) TransportCredentials { - tc := &tlsCreds{credinternal.CloneTLSConfig(c)} - tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos) + config := applyDefaults(c) + if config.GetConfigForClient != nil { + oldFn := config.GetConfigForClient + config.GetConfigForClient = func(hello *tls.ClientHelloInfo) (*tls.Config, error) { + cfgForClient, err := oldFn(hello) + if err != nil || cfgForClient == nil { + return cfgForClient, err + } + return applyDefaults(cfgForClient), nil + } + } + return &tlsCreds{config: config} +} + +func applyDefaults(c *tls.Config) *tls.Config { + config := credinternal.CloneTLSConfig(c) + config.NextProtos = credinternal.AppendH2ToNextProtos(config.NextProtos) // If the user did not configure a MinVersion and did not configure a // MaxVersion < 1.2, use MinVersion=1.2, which is required by // https://datatracker.ietf.org/doc/html/rfc7540#section-9.2 - if tc.config.MinVersion == 0 && (tc.config.MaxVersion == 0 || tc.config.MaxVersion >= tls.VersionTLS12) { - tc.config.MinVersion = tls.VersionTLS12 + if config.MinVersion == 0 && (config.MaxVersion == 0 || config.MaxVersion >= tls.VersionTLS12) { + config.MinVersion = tls.VersionTLS12 } // If the user did not configure CipherSuites, use all "secure" cipher // suites reported by the TLS package, but remove some explicitly forbidden // by https://datatracker.ietf.org/doc/html/rfc7540#appendix-A - if tc.config.CipherSuites == nil { + if config.CipherSuites == nil { for _, cs := range tls.CipherSuites() { if _, ok := tls12ForbiddenCipherSuites[cs.ID]; !ok { - tc.config.CipherSuites = append(tc.config.CipherSuites, cs.ID) + config.CipherSuites = append(config.CipherSuites, cs.ID) } } } - return tc + return config } // NewClientTLSFromCert constructs TLS credentials from the provided root diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index f5453d48a53f3..518692c3afb87 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -33,6 +33,7 @@ import ( "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" ) @@ -60,7 +61,7 @@ func init() { internal.WithBinaryLogger = withBinaryLogger internal.JoinDialOptions = newJoinDialOption internal.DisableGlobalDialOptions = newDisableGlobalDialOptions - internal.WithRecvBufferPool = withRecvBufferPool + internal.WithBufferPool = withBufferPool } // dialOptions configure a Dial call. dialOptions are set by the DialOption @@ -92,7 +93,6 @@ type dialOptions struct { defaultServiceConfigRawJSON *string resolvers []resolver.Builder idleTimeout time.Duration - recvBufferPool SharedBufferPool defaultScheme string maxCallAttempts int } @@ -436,7 +436,7 @@ func WithTimeout(d time.Duration) DialOption { // option to true from the Control field. For a concrete example of how to do // this, see internal.NetDialerWithTCPKeepalive(). // -// For more information, please see [issue 23459] in the Go github repo. +// For more information, please see [issue 23459] in the Go GitHub repo. // // [issue 23459]: https://github.com/golang/go/issues/23459 func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { @@ -518,6 +518,8 @@ func WithUserAgent(s string) DialOption { // WithKeepaliveParams returns a DialOption that specifies keepalive parameters // for the client transport. +// +// Keepalive is disabled by default. func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { if kp.Time < internal.KeepaliveMinPingTime { logger.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime) @@ -677,11 +679,11 @@ func defaultDialOptions() dialOptions { WriteBufferSize: defaultWriteBufSize, UseProxy: true, UserAgent: grpcUA, + BufferPool: mem.DefaultBufferPool(), }, bs: internalbackoff.DefaultExponential, healthCheckFunc: internal.HealthCheckFunc, idleTimeout: 30 * time.Minute, - recvBufferPool: nopBufferPool{}, defaultScheme: "dns", maxCallAttempts: defaultMaxCallAttempts, } @@ -758,25 +760,8 @@ func WithMaxCallAttempts(n int) DialOption { }) } -// WithRecvBufferPool returns a DialOption that configures the ClientConn -// to use the provided shared buffer pool for parsing incoming messages. Depending -// on the application's workload, this could result in reduced memory allocation. -// -// If you are unsure about how to implement a memory pool but want to utilize one, -// begin with grpc.NewSharedBufferPool. -// -// Note: The shared buffer pool feature will not be active if any of the following -// options are used: WithStatsHandler, EnableTracing, or binary logging. In such -// cases, the shared buffer pool will be ignored. -// -// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in -// v1.60.0 or later. -func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption { - return withRecvBufferPool(bufferPool) -} - -func withRecvBufferPool(bufferPool SharedBufferPool) DialOption { +func withBufferPool(bufferPool mem.BufferPool) DialOption { return newFuncDialOption(func(o *dialOptions) { - o.recvBufferPool = bufferPool + o.copts.BufferPool = bufferPool }) } diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go index 0022859ad7465..e7b532b6f806f 100644 --- a/vendor/google.golang.org/grpc/doc.go +++ b/vendor/google.golang.org/grpc/doc.go @@ -16,7 +16,7 @@ * */ -//go:generate ./regenerate.sh +//go:generate ./scripts/regenerate.sh /* Package grpc implements an RPC system called gRPC. diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 5ebf88d7147f2..11d0ae142c429 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -94,7 +94,7 @@ type Codec interface { Name() string } -var registeredCodecs = make(map[string]Codec) +var registeredCodecs = make(map[string]any) // RegisterCodec registers the provided Codec for use with all gRPC clients and // servers. @@ -126,5 +126,6 @@ func RegisterCodec(codec Codec) { // // The content-subtype is expected to be lowercase. func GetCodec(contentSubtype string) Codec { - return registeredCodecs[contentSubtype] + c, _ := registeredCodecs[contentSubtype].(Codec) + return c } diff --git a/vendor/google.golang.org/grpc/encoding/encoding_v2.go b/vendor/google.golang.org/grpc/encoding/encoding_v2.go new file mode 100644 index 0000000000000..074c5e234a7b3 --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/encoding_v2.go @@ -0,0 +1,81 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package encoding + +import ( + "strings" + + "google.golang.org/grpc/mem" +) + +// CodecV2 defines the interface gRPC uses to encode and decode messages. Note +// that implementations of this interface must be thread safe; a CodecV2's +// methods can be called from concurrent goroutines. +type CodecV2 interface { + // Marshal returns the wire format of v. The buffers in the returned + // [mem.BufferSlice] must have at least one reference each, which will be freed + // by gRPC when they are no longer needed. + Marshal(v any) (out mem.BufferSlice, err error) + // Unmarshal parses the wire format into v. Note that data will be freed as soon + // as this function returns. If the codec wishes to guarantee access to the data + // after this function, it must take its own reference that it frees when it is + // no longer needed. + Unmarshal(data mem.BufferSlice, v any) error + // Name returns the name of the Codec implementation. The returned string + // will be used as part of content type in transmission. The result must be + // static; the result cannot change between calls. + Name() string +} + +// RegisterCodecV2 registers the provided CodecV2 for use with all gRPC clients and +// servers. +// +// The CodecV2 will be stored and looked up by result of its Name() method, which +// should match the content-subtype of the encoding handled by the CodecV2. This +// is case-insensitive, and is stored and looked up as lowercase. If the +// result of calling Name() is an empty string, RegisterCodecV2 will panic. See +// Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If both a Codec and CodecV2 are registered with the same name, the CodecV2 +// will be used. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Codecs are +// registered with the same name, the one registered last will take effect. +func RegisterCodecV2(codec CodecV2) { + if codec == nil { + panic("cannot register a nil CodecV2") + } + if codec.Name() == "" { + panic("cannot register CodecV2 with empty string result for Name()") + } + contentSubtype := strings.ToLower(codec.Name()) + registeredCodecs[contentSubtype] = codec +} + +// GetCodecV2 gets a registered CodecV2 by content-subtype, or nil if no CodecV2 is +// registered for the content-subtype. +// +// The content-subtype is expected to be lowercase. +func GetCodecV2(contentSubtype string) CodecV2 { + c, _ := registeredCodecs[contentSubtype].(CodecV2) + return c +} diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go index 66d5cdf03ec58..ceec319dd2fb4 100644 --- a/vendor/google.golang.org/grpc/encoding/proto/proto.go +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -1,6 +1,6 @@ /* * - * Copyright 2018 gRPC authors. + * Copyright 2024 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -24,6 +24,7 @@ import ( "fmt" "google.golang.org/grpc/encoding" + "google.golang.org/grpc/mem" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/protoadapt" ) @@ -32,28 +33,51 @@ import ( const Name = "proto" func init() { - encoding.RegisterCodec(codec{}) + encoding.RegisterCodecV2(&codecV2{}) } -// codec is a Codec implementation with protobuf. It is the default codec for gRPC. -type codec struct{} +// codec is a CodecV2 implementation with protobuf. It is the default codec for +// gRPC. +type codecV2 struct{} -func (codec) Marshal(v any) ([]byte, error) { +func (c *codecV2) Marshal(v any) (data mem.BufferSlice, err error) { vv := messageV2Of(v) if vv == nil { - return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) + return nil, fmt.Errorf("proto: failed to marshal, message is %T, want proto.Message", v) } - return proto.Marshal(vv) + size := proto.Size(vv) + if mem.IsBelowBufferPoolingThreshold(size) { + buf, err := proto.Marshal(vv) + if err != nil { + return nil, err + } + data = append(data, mem.SliceBuffer(buf)) + } else { + pool := mem.DefaultBufferPool() + buf := pool.Get(size) + if _, err := (proto.MarshalOptions{}).MarshalAppend((*buf)[:0], vv); err != nil { + pool.Put(buf) + return nil, err + } + data = append(data, mem.NewBuffer(buf, pool)) + } + + return data, nil } -func (codec) Unmarshal(data []byte, v any) error { +func (c *codecV2) Unmarshal(data mem.BufferSlice, v any) (err error) { vv := messageV2Of(v) if vv == nil { return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) } - return proto.Unmarshal(data, vv) + buf := data.MaterializeToBuffer(mem.DefaultBufferPool()) + defer buf.Free() + // TODO: Upgrade proto.Unmarshal to support mem.BufferSlice. Right now, it's not + // really possible without a major overhaul of the proto package, but the + // vtprotobuf library may be able to support this. + return proto.Unmarshal(buf.ReadOnlyData(), vv) } func messageV2Of(v any) proto.Message { @@ -67,6 +91,6 @@ func messageV2Of(v any) proto.Message { return nil } -func (codec) Name() string { +func (c *codecV2) Name() string { return Name } diff --git a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go new file mode 100644 index 0000000000000..1d827dd5d9d41 --- /dev/null +++ b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go @@ -0,0 +1,269 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package stats + +import ( + "maps" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" +) + +func init() { + internal.SnapshotMetricRegistryForTesting = snapshotMetricsRegistryForTesting +} + +var logger = grpclog.Component("metrics-registry") + +// DefaultMetrics are the default metrics registered through global metrics +// registry. This is written to at initialization time only, and is read only +// after initialization. +var DefaultMetrics = NewMetrics() + +// MetricDescriptor is the data for a registered metric. +type MetricDescriptor struct { + // The name of this metric. This name must be unique across the whole binary + // (including any per call metrics). See + // https://github.com/grpc/proposal/blob/master/A79-non-per-call-metrics-architecture.md#metric-instrument-naming-conventions + // for metric naming conventions. + Name Metric + // The description of this metric. + Description string + // The unit (e.g. entries, seconds) of this metric. + Unit string + // The required label keys for this metric. These are intended to + // metrics emitted from a stats handler. + Labels []string + // The optional label keys for this metric. These are intended to attached + // to metrics emitted from a stats handler if configured. + OptionalLabels []string + // Whether this metric is on by default. + Default bool + // The type of metric. This is set by the metric registry, and not intended + // to be set by a component registering a metric. + Type MetricType + // Bounds are the bounds of this metric. This only applies to histogram + // metrics. If unset or set with length 0, stats handlers will fall back to + // default bounds. + Bounds []float64 +} + +// MetricType is the type of metric. +type MetricType int + +// Type of metric supported by this instrument registry. +const ( + MetricTypeIntCount MetricType = iota + MetricTypeFloatCount + MetricTypeIntHisto + MetricTypeFloatHisto + MetricTypeIntGauge +) + +// Int64CountHandle is a typed handle for a int count metric. This handle +// is passed at the recording point in order to know which metric to record +// on. +type Int64CountHandle MetricDescriptor + +// Descriptor returns the int64 count handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64CountHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 count value on the metrics recorder provided. +func (h *Int64CountHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) { + recorder.RecordInt64Count(h, incr, labels...) +} + +// Float64CountHandle is a typed handle for a float count metric. This handle is +// passed at the recording point in order to know which metric to record on. +type Float64CountHandle MetricDescriptor + +// Descriptor returns the float64 count handle typecast to a pointer to a +// MetricDescriptor. +func (h *Float64CountHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the float64 count value on the metrics recorder provided. +func (h *Float64CountHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) { + recorder.RecordFloat64Count(h, incr, labels...) +} + +// Int64HistoHandle is a typed handle for an int histogram metric. This handle +// is passed at the recording point in order to know which metric to record on. +type Int64HistoHandle MetricDescriptor + +// Descriptor returns the int64 histo handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64HistoHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 histo value on the metrics recorder provided. +func (h *Int64HistoHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) { + recorder.RecordInt64Histo(h, incr, labels...) +} + +// Float64HistoHandle is a typed handle for a float histogram metric. This +// handle is passed at the recording point in order to know which metric to +// record on. +type Float64HistoHandle MetricDescriptor + +// Descriptor returns the float64 histo handle typecast to a pointer to a +// MetricDescriptor. +func (h *Float64HistoHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the float64 histo value on the metrics recorder provided. +func (h *Float64HistoHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) { + recorder.RecordFloat64Histo(h, incr, labels...) +} + +// Int64GaugeHandle is a typed handle for an int gauge metric. This handle is +// passed at the recording point in order to know which metric to record on. +type Int64GaugeHandle MetricDescriptor + +// Descriptor returns the int64 gauge handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64GaugeHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 histo value on the metrics recorder provided. +func (h *Int64GaugeHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) { + recorder.RecordInt64Gauge(h, incr, labels...) +} + +// registeredMetrics are the registered metric descriptor names. +var registeredMetrics = make(map[Metric]bool) + +// metricsRegistry contains all of the registered metrics. +// +// This is written to only at init time, and read only after that. +var metricsRegistry = make(map[Metric]*MetricDescriptor) + +// DescriptorForMetric returns the MetricDescriptor from the global registry. +// +// Returns nil if MetricDescriptor not present. +func DescriptorForMetric(metric Metric) *MetricDescriptor { + return metricsRegistry[metric] +} + +func registerMetric(name Metric, def bool) { + if registeredMetrics[name] { + logger.Fatalf("metric %v already registered", name) + } + registeredMetrics[name] = true + if def { + DefaultMetrics = DefaultMetrics.Add(name) + } +} + +// RegisterInt64Count registers the metric description onto the global registry. +// It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64Count(descriptor MetricDescriptor) *Int64CountHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeIntCount + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64CountHandle)(descPtr) +} + +// RegisterFloat64Count registers the metric description onto the global +// registry. It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterFloat64Count(descriptor MetricDescriptor) *Float64CountHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeFloatCount + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Float64CountHandle)(descPtr) +} + +// RegisterInt64Histo registers the metric description onto the global registry. +// It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64Histo(descriptor MetricDescriptor) *Int64HistoHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeIntHisto + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64HistoHandle)(descPtr) +} + +// RegisterFloat64Histo registers the metric description onto the global +// registry. It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterFloat64Histo(descriptor MetricDescriptor) *Float64HistoHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeFloatHisto + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Float64HistoHandle)(descPtr) +} + +// RegisterInt64Gauge registers the metric description onto the global registry. +// It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64Gauge(descriptor MetricDescriptor) *Int64GaugeHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeIntGauge + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64GaugeHandle)(descPtr) +} + +// snapshotMetricsRegistryForTesting snapshots the global data of the metrics +// registry. Returns a cleanup function that sets the metrics registry to its +// original state. +func snapshotMetricsRegistryForTesting() func() { + oldDefaultMetrics := DefaultMetrics + oldRegisteredMetrics := registeredMetrics + oldMetricsRegistry := metricsRegistry + + registeredMetrics = make(map[Metric]bool) + metricsRegistry = make(map[Metric]*MetricDescriptor) + maps.Copy(registeredMetrics, registeredMetrics) + maps.Copy(metricsRegistry, metricsRegistry) + + return func() { + DefaultMetrics = oldDefaultMetrics + registeredMetrics = oldRegisteredMetrics + metricsRegistry = oldMetricsRegistry + } +} diff --git a/vendor/google.golang.org/grpc/experimental/stats/metrics.go b/vendor/google.golang.org/grpc/experimental/stats/metrics.go new file mode 100644 index 0000000000000..3221f7a633a37 --- /dev/null +++ b/vendor/google.golang.org/grpc/experimental/stats/metrics.go @@ -0,0 +1,114 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package stats contains experimental metrics/stats API's. +package stats + +import "maps" + +// MetricsRecorder records on metrics derived from metric registry. +type MetricsRecorder interface { + // RecordInt64Count records the measurement alongside labels on the int + // count associated with the provided handle. + RecordInt64Count(handle *Int64CountHandle, incr int64, labels ...string) + // RecordFloat64Count records the measurement alongside labels on the float + // count associated with the provided handle. + RecordFloat64Count(handle *Float64CountHandle, incr float64, labels ...string) + // RecordInt64Histo records the measurement alongside labels on the int + // histo associated with the provided handle. + RecordInt64Histo(handle *Int64HistoHandle, incr int64, labels ...string) + // RecordFloat64Histo records the measurement alongside labels on the float + // histo associated with the provided handle. + RecordFloat64Histo(handle *Float64HistoHandle, incr float64, labels ...string) + // RecordInt64Gauge records the measurement alongside labels on the int + // gauge associated with the provided handle. + RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string) +} + +// Metric is an identifier for a metric. +type Metric string + +// Metrics is a set of metrics to record. Once created, Metrics is immutable, +// however Add and Remove can make copies with specific metrics added or +// removed, respectively. +// +// Do not construct directly; use NewMetrics instead. +type Metrics struct { + // metrics are the set of metrics to initialize. + metrics map[Metric]bool +} + +// NewMetrics returns a Metrics containing Metrics. +func NewMetrics(metrics ...Metric) *Metrics { + newMetrics := make(map[Metric]bool) + for _, metric := range metrics { + newMetrics[metric] = true + } + return &Metrics{ + metrics: newMetrics, + } +} + +// Metrics returns the metrics set. The returned map is read-only and must not +// be modified. +func (m *Metrics) Metrics() map[Metric]bool { + return m.metrics +} + +// Add adds the metrics to the metrics set and returns a new copy with the +// additional metrics. +func (m *Metrics) Add(metrics ...Metric) *Metrics { + newMetrics := make(map[Metric]bool) + for metric := range m.metrics { + newMetrics[metric] = true + } + + for _, metric := range metrics { + newMetrics[metric] = true + } + return &Metrics{ + metrics: newMetrics, + } +} + +// Join joins the metrics passed in with the metrics set, and returns a new copy +// with the merged metrics. +func (m *Metrics) Join(metrics *Metrics) *Metrics { + newMetrics := make(map[Metric]bool) + maps.Copy(newMetrics, m.metrics) + maps.Copy(newMetrics, metrics.metrics) + return &Metrics{ + metrics: newMetrics, + } +} + +// Remove removes the metrics from the metrics set and returns a new copy with +// the metrics removed. +func (m *Metrics) Remove(metrics ...Metric) *Metrics { + newMetrics := make(map[Metric]bool) + for metric := range m.metrics { + newMetrics[metric] = true + } + + for _, metric := range metrics { + delete(newMetrics, metric) + } + return &Metrics{ + metrics: newMetrics, + } +} diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go index ac73c9ced2553..f1ae080dcb816 100644 --- a/vendor/google.golang.org/grpc/grpclog/component.go +++ b/vendor/google.golang.org/grpc/grpclog/component.go @@ -20,8 +20,6 @@ package grpclog import ( "fmt" - - "google.golang.org/grpc/internal/grpclog" ) // componentData records the settings for a component. @@ -33,22 +31,22 @@ var cache = map[string]*componentData{} func (c *componentData) InfoDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.InfoDepth(depth+1, args...) + InfoDepth(depth+1, args...) } func (c *componentData) WarningDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.WarningDepth(depth+1, args...) + WarningDepth(depth+1, args...) } func (c *componentData) ErrorDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.ErrorDepth(depth+1, args...) + ErrorDepth(depth+1, args...) } func (c *componentData) FatalDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.FatalDepth(depth+1, args...) + FatalDepth(depth+1, args...) } func (c *componentData) Info(args ...any) { diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go index 16928c9cb993c..db320105e64e2 100644 --- a/vendor/google.golang.org/grpc/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -18,18 +18,15 @@ // Package grpclog defines logging for grpc. // -// All logs in transport and grpclb packages only go to verbose level 2. -// All logs in other packages in grpc are logged in spite of the verbosity level. -// -// In the default logger, -// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL, -// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL. -package grpclog // import "google.golang.org/grpc/grpclog" +// In the default logger, severity level can be set by environment variable +// GRPC_GO_LOG_SEVERITY_LEVEL, verbosity level can be set by +// GRPC_GO_LOG_VERBOSITY_LEVEL. +package grpclog import ( "os" - "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/grpclog/internal" ) func init() { @@ -38,58 +35,58 @@ func init() { // V reports whether verbosity level l is at least the requested verbose level. func V(l int) bool { - return grpclog.Logger.V(l) + return internal.LoggerV2Impl.V(l) } // Info logs to the INFO log. func Info(args ...any) { - grpclog.Logger.Info(args...) + internal.LoggerV2Impl.Info(args...) } // Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. func Infof(format string, args ...any) { - grpclog.Logger.Infof(format, args...) + internal.LoggerV2Impl.Infof(format, args...) } // Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. func Infoln(args ...any) { - grpclog.Logger.Infoln(args...) + internal.LoggerV2Impl.Infoln(args...) } // Warning logs to the WARNING log. func Warning(args ...any) { - grpclog.Logger.Warning(args...) + internal.LoggerV2Impl.Warning(args...) } // Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. func Warningf(format string, args ...any) { - grpclog.Logger.Warningf(format, args...) + internal.LoggerV2Impl.Warningf(format, args...) } // Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. func Warningln(args ...any) { - grpclog.Logger.Warningln(args...) + internal.LoggerV2Impl.Warningln(args...) } // Error logs to the ERROR log. func Error(args ...any) { - grpclog.Logger.Error(args...) + internal.LoggerV2Impl.Error(args...) } // Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. func Errorf(format string, args ...any) { - grpclog.Logger.Errorf(format, args...) + internal.LoggerV2Impl.Errorf(format, args...) } // Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. func Errorln(args ...any) { - grpclog.Logger.Errorln(args...) + internal.LoggerV2Impl.Errorln(args...) } // Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. // It calls os.Exit() with exit code 1. func Fatal(args ...any) { - grpclog.Logger.Fatal(args...) + internal.LoggerV2Impl.Fatal(args...) // Make sure fatal logs will exit. os.Exit(1) } @@ -97,15 +94,15 @@ func Fatal(args ...any) { // Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. // It calls os.Exit() with exit code 1. func Fatalf(format string, args ...any) { - grpclog.Logger.Fatalf(format, args...) + internal.LoggerV2Impl.Fatalf(format, args...) // Make sure fatal logs will exit. os.Exit(1) } // Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. -// It calle os.Exit()) with exit code 1. +// It calls os.Exit() with exit code 1. func Fatalln(args ...any) { - grpclog.Logger.Fatalln(args...) + internal.LoggerV2Impl.Fatalln(args...) // Make sure fatal logs will exit. os.Exit(1) } @@ -114,19 +111,76 @@ func Fatalln(args ...any) { // // Deprecated: use Info. func Print(args ...any) { - grpclog.Logger.Info(args...) + internal.LoggerV2Impl.Info(args...) } // Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. // // Deprecated: use Infof. func Printf(format string, args ...any) { - grpclog.Logger.Infof(format, args...) + internal.LoggerV2Impl.Infof(format, args...) } // Println prints to the logger. Arguments are handled in the manner of fmt.Println. // // Deprecated: use Infoln. func Println(args ...any) { - grpclog.Logger.Infoln(args...) + internal.LoggerV2Impl.Infoln(args...) +} + +// InfoDepth logs to the INFO log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func InfoDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.InfoDepth(depth, args...) + } else { + internal.LoggerV2Impl.Infoln(args...) + } +} + +// WarningDepth logs to the WARNING log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WarningDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.WarningDepth(depth, args...) + } else { + internal.LoggerV2Impl.Warningln(args...) + } +} + +// ErrorDepth logs to the ERROR log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ErrorDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.ErrorDepth(depth, args...) + } else { + internal.LoggerV2Impl.Errorln(args...) + } +} + +// FatalDepth logs to the FATAL log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func FatalDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.FatalDepth(depth, args...) + } else { + internal.LoggerV2Impl.Fatalln(args...) + } + os.Exit(1) } diff --git a/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go b/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go new file mode 100644 index 0000000000000..59c03bc14c2a9 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go @@ -0,0 +1,26 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains functionality internal to the grpclog package. +package internal + +// LoggerV2Impl is the logger used for the non-depth log functions. +var LoggerV2Impl LoggerV2 + +// DepthLoggerV2Impl is the logger used for the depth log functions. +var DepthLoggerV2Impl DepthLoggerV2 diff --git a/vendor/google.golang.org/grpc/grpclog/internal/logger.go b/vendor/google.golang.org/grpc/grpclog/internal/logger.go new file mode 100644 index 0000000000000..e524fdd40b236 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/internal/logger.go @@ -0,0 +1,87 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +// Logger mimics golang's standard Logger as an interface. +// +// Deprecated: use LoggerV2. +type Logger interface { + Fatal(args ...any) + Fatalf(format string, args ...any) + Fatalln(args ...any) + Print(args ...any) + Printf(format string, args ...any) + Println(args ...any) +} + +// LoggerWrapper wraps Logger into a LoggerV2. +type LoggerWrapper struct { + Logger +} + +// Info logs to INFO log. Arguments are handled in the manner of fmt.Print. +func (l *LoggerWrapper) Info(args ...any) { + l.Logger.Print(args...) +} + +// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. +func (l *LoggerWrapper) Infoln(args ...any) { + l.Logger.Println(args...) +} + +// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. +func (l *LoggerWrapper) Infof(format string, args ...any) { + l.Logger.Printf(format, args...) +} + +// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. +func (l *LoggerWrapper) Warning(args ...any) { + l.Logger.Print(args...) +} + +// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. +func (l *LoggerWrapper) Warningln(args ...any) { + l.Logger.Println(args...) +} + +// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. +func (l *LoggerWrapper) Warningf(format string, args ...any) { + l.Logger.Printf(format, args...) +} + +// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. +func (l *LoggerWrapper) Error(args ...any) { + l.Logger.Print(args...) +} + +// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. +func (l *LoggerWrapper) Errorln(args ...any) { + l.Logger.Println(args...) +} + +// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. +func (l *LoggerWrapper) Errorf(format string, args ...any) { + l.Logger.Printf(format, args...) +} + +// V reports whether verbosity level l is at least the requested verbose level. +func (*LoggerWrapper) V(int) bool { + // Returns true for all verbose level. + return true +} diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go similarity index 52% rename from vendor/google.golang.org/grpc/internal/grpclog/grpclog.go rename to vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go index bfc45102ab245..07df71e98a87a 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go @@ -1,6 +1,6 @@ /* * - * Copyright 2020 gRPC authors. + * Copyright 2024 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,59 +16,17 @@ * */ -// Package grpclog (internal) defines depth logging for grpc. -package grpclog +package internal import ( + "encoding/json" + "fmt" + "io" + "log" "os" ) -// Logger is the logger used for the non-depth log functions. -var Logger LoggerV2 - -// DepthLogger is the logger used for the depth log functions. -var DepthLogger DepthLoggerV2 - -// InfoDepth logs to the INFO log at the specified depth. -func InfoDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.InfoDepth(depth, args...) - } else { - Logger.Infoln(args...) - } -} - -// WarningDepth logs to the WARNING log at the specified depth. -func WarningDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.WarningDepth(depth, args...) - } else { - Logger.Warningln(args...) - } -} - -// ErrorDepth logs to the ERROR log at the specified depth. -func ErrorDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.ErrorDepth(depth, args...) - } else { - Logger.Errorln(args...) - } -} - -// FatalDepth logs to the FATAL log at the specified depth. -func FatalDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.FatalDepth(depth, args...) - } else { - Logger.Fatalln(args...) - } - os.Exit(1) -} - // LoggerV2 does underlying logging work for grpclog. -// This is a copy of the LoggerV2 defined in the external grpclog package. It -// is defined here to avoid a circular dependency. type LoggerV2 interface { // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. Info(args ...any) @@ -107,14 +65,13 @@ type LoggerV2 interface { // DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements // DepthLoggerV2, the below functions will be called with the appropriate stack // depth set for trivial functions the logger may ignore. -// This is a copy of the DepthLoggerV2 defined in the external grpclog package. -// It is defined here to avoid a circular dependency. // // # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. type DepthLoggerV2 interface { + LoggerV2 // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. InfoDepth(depth int, args ...any) // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. @@ -124,3 +81,124 @@ type DepthLoggerV2 interface { // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. FatalDepth(depth int, args ...any) } + +const ( + // infoLog indicates Info severity. + infoLog int = iota + // warningLog indicates Warning severity. + warningLog + // errorLog indicates Error severity. + errorLog + // fatalLog indicates Fatal severity. + fatalLog +) + +// severityName contains the string representation of each severity. +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// loggerT is the default logger used by grpclog. +type loggerT struct { + m []*log.Logger + v int + jsonFormat bool +} + +func (g *loggerT) output(severity int, s string) { + sevStr := severityName[severity] + if !g.jsonFormat { + g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) + return + } + // TODO: we can also include the logging component, but that needs more + // (API) changes. + b, _ := json.Marshal(map[string]string{ + "severity": sevStr, + "message": s, + }) + g.m[severity].Output(2, string(b)) +} + +func (g *loggerT) Info(args ...any) { + g.output(infoLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Infoln(args ...any) { + g.output(infoLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Infof(format string, args ...any) { + g.output(infoLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Warning(args ...any) { + g.output(warningLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Warningln(args ...any) { + g.output(warningLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Warningf(format string, args ...any) { + g.output(warningLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Error(args ...any) { + g.output(errorLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Errorln(args ...any) { + g.output(errorLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Errorf(format string, args ...any) { + g.output(errorLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Fatal(args ...any) { + g.output(fatalLog, fmt.Sprint(args...)) + os.Exit(1) +} + +func (g *loggerT) Fatalln(args ...any) { + g.output(fatalLog, fmt.Sprintln(args...)) + os.Exit(1) +} + +func (g *loggerT) Fatalf(format string, args ...any) { + g.output(fatalLog, fmt.Sprintf(format, args...)) + os.Exit(1) +} + +func (g *loggerT) V(l int) bool { + return l <= g.v +} + +// LoggerV2Config configures the LoggerV2 implementation. +type LoggerV2Config struct { + // Verbosity sets the verbosity level of the logger. + Verbosity int + // FormatJSON controls whether the logger should output logs in JSON format. + FormatJSON bool +} + +// NewLoggerV2 creates a new LoggerV2 instance with the provided configuration. +// The infoW, warningW, and errorW writers are used to write log messages of +// different severity levels. +func NewLoggerV2(infoW, warningW, errorW io.Writer, c LoggerV2Config) LoggerV2 { + var m []*log.Logger + flag := log.LstdFlags + if c.FormatJSON { + flag = 0 + } + m = append(m, log.New(infoW, "", flag)) + m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) + ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. + m = append(m, log.New(ew, "", flag)) + m = append(m, log.New(ew, "", flag)) + return &loggerT{m: m, v: c.Verbosity, jsonFormat: c.FormatJSON} +} diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go index b1674d8267ca4..4b203585707af 100644 --- a/vendor/google.golang.org/grpc/grpclog/logger.go +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -18,70 +18,17 @@ package grpclog -import "google.golang.org/grpc/internal/grpclog" +import "google.golang.org/grpc/grpclog/internal" // Logger mimics golang's standard Logger as an interface. // // Deprecated: use LoggerV2. -type Logger interface { - Fatal(args ...any) - Fatalf(format string, args ...any) - Fatalln(args ...any) - Print(args ...any) - Printf(format string, args ...any) - Println(args ...any) -} +type Logger internal.Logger // SetLogger sets the logger that is used in grpc. Call only from // init() functions. // // Deprecated: use SetLoggerV2. func SetLogger(l Logger) { - grpclog.Logger = &loggerWrapper{Logger: l} -} - -// loggerWrapper wraps Logger into a LoggerV2. -type loggerWrapper struct { - Logger -} - -func (g *loggerWrapper) Info(args ...any) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Infoln(args ...any) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Infof(format string, args ...any) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) Warning(args ...any) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Warningln(args ...any) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Warningf(format string, args ...any) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) Error(args ...any) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Errorln(args ...any) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Errorf(format string, args ...any) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) V(l int) bool { - // Returns true for all verbose level. - return true + internal.LoggerV2Impl = &internal.LoggerWrapper{Logger: l} } diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go index ecfd36d713032..892dc13d164b9 100644 --- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -19,52 +19,16 @@ package grpclog import ( - "encoding/json" - "fmt" "io" - "log" "os" "strconv" "strings" - "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/grpclog/internal" ) // LoggerV2 does underlying logging work for grpclog. -type LoggerV2 interface { - // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. - Info(args ...any) - // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. - Infoln(args ...any) - // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. - Infof(format string, args ...any) - // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. - Warning(args ...any) - // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. - Warningln(args ...any) - // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. - Warningf(format string, args ...any) - // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. - Error(args ...any) - // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - Errorln(args ...any) - // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - Errorf(format string, args ...any) - // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatal(args ...any) - // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatalln(args ...any) - // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatalf(format string, args ...any) - // V reports whether verbosity level l is at least the requested verbose level. - V(l int) bool -} +type LoggerV2 internal.LoggerV2 // SetLoggerV2 sets logger that is used in grpc to a V2 logger. // Not mutex-protected, should be called before any gRPC functions. @@ -72,34 +36,8 @@ func SetLoggerV2(l LoggerV2) { if _, ok := l.(*componentData); ok { panic("cannot use component logger as grpclog logger") } - grpclog.Logger = l - grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2) -} - -const ( - // infoLog indicates Info severity. - infoLog int = iota - // warningLog indicates Warning severity. - warningLog - // errorLog indicates Error severity. - errorLog - // fatalLog indicates Fatal severity. - fatalLog -) - -// severityName contains the string representation of each severity. -var severityName = []string{ - infoLog: "INFO", - warningLog: "WARNING", - errorLog: "ERROR", - fatalLog: "FATAL", -} - -// loggerT is the default logger used by grpclog. -type loggerT struct { - m []*log.Logger - v int - jsonFormat bool + internal.LoggerV2Impl = l + internal.DepthLoggerV2Impl, _ = l.(internal.DepthLoggerV2) } // NewLoggerV2 creates a loggerV2 with the provided writers. @@ -108,32 +46,13 @@ type loggerT struct { // Warning logs will be written to warningW and infoW. // Info logs will be written to infoW. func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { - return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{}) + return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{}) } // NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and // verbosity level. func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { - return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{verbose: v}) -} - -type loggerV2Config struct { - verbose int - jsonFormat bool -} - -func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) LoggerV2 { - var m []*log.Logger - flag := log.LstdFlags - if c.jsonFormat { - flag = 0 - } - m = append(m, log.New(infoW, "", flag)) - m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) - ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. - m = append(m, log.New(ew, "", flag)) - m = append(m, log.New(ew, "", flag)) - return &loggerT{m: m, v: c.verbose, jsonFormat: c.jsonFormat} + return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{Verbosity: v}) } // newLoggerV2 creates a loggerV2 to be used as default logger. @@ -161,80 +80,10 @@ func newLoggerV2() LoggerV2 { jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json") - return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{ - verbose: v, - jsonFormat: jsonFormat, - }) -} - -func (g *loggerT) output(severity int, s string) { - sevStr := severityName[severity] - if !g.jsonFormat { - g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) - return - } - // TODO: we can also include the logging component, but that needs more - // (API) changes. - b, _ := json.Marshal(map[string]string{ - "severity": sevStr, - "message": s, + return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{ + Verbosity: v, + FormatJSON: jsonFormat, }) - g.m[severity].Output(2, string(b)) -} - -func (g *loggerT) Info(args ...any) { - g.output(infoLog, fmt.Sprint(args...)) -} - -func (g *loggerT) Infoln(args ...any) { - g.output(infoLog, fmt.Sprintln(args...)) -} - -func (g *loggerT) Infof(format string, args ...any) { - g.output(infoLog, fmt.Sprintf(format, args...)) -} - -func (g *loggerT) Warning(args ...any) { - g.output(warningLog, fmt.Sprint(args...)) -} - -func (g *loggerT) Warningln(args ...any) { - g.output(warningLog, fmt.Sprintln(args...)) -} - -func (g *loggerT) Warningf(format string, args ...any) { - g.output(warningLog, fmt.Sprintf(format, args...)) -} - -func (g *loggerT) Error(args ...any) { - g.output(errorLog, fmt.Sprint(args...)) -} - -func (g *loggerT) Errorln(args ...any) { - g.output(errorLog, fmt.Sprintln(args...)) -} - -func (g *loggerT) Errorf(format string, args ...any) { - g.output(errorLog, fmt.Sprintf(format, args...)) -} - -func (g *loggerT) Fatal(args ...any) { - g.output(fatalLog, fmt.Sprint(args...)) - os.Exit(1) -} - -func (g *loggerT) Fatalln(args ...any) { - g.output(fatalLog, fmt.Sprintln(args...)) - os.Exit(1) -} - -func (g *loggerT) Fatalf(format string, args ...any) { - g.output(fatalLog, fmt.Sprintf(format, args...)) - os.Exit(1) -} - -func (g *loggerT) V(l int) bool { - return l <= g.v } // DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements @@ -245,14 +94,4 @@ func (g *loggerT) V(l int) bool { // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. -type DepthLoggerV2 interface { - LoggerV2 - // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. - InfoDepth(depth int, args ...any) - // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. - WarningDepth(depth int, args ...any) - // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. - ErrorDepth(depth int, args ...any) - // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. - FatalDepth(depth int, args ...any) -} +type DepthLoggerV2 internal.DepthLoggerV2 diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index 38b8835073502..d92335445f650 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/health/v1/health.proto package grpc_health_v1 @@ -237,7 +237,7 @@ func file_grpc_health_v1_health_proto_rawDescGZIP() []byte { var file_grpc_health_v1_health_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_grpc_health_v1_health_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_grpc_health_v1_health_proto_goTypes = []interface{}{ +var file_grpc_health_v1_health_proto_goTypes = []any{ (HealthCheckResponse_ServingStatus)(0), // 0: grpc.health.v1.HealthCheckResponse.ServingStatus (*HealthCheckRequest)(nil), // 1: grpc.health.v1.HealthCheckRequest (*HealthCheckResponse)(nil), // 2: grpc.health.v1.HealthCheckResponse @@ -261,7 +261,7 @@ func file_grpc_health_v1_health_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*HealthCheckRequest); i { case 0: return &v.state @@ -273,7 +273,7 @@ func file_grpc_health_v1_health_proto_init() { return nil } } - file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*HealthCheckResponse); i { case 0: return &v.state diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go index 51b736ba06e5f..f96b8ab4927e1 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.4.0 -// - protoc v4.25.2 +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.27.1 // source: grpc/health/v1/health.proto package grpc_health_v1 @@ -32,8 +32,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.62.0 or later. -const _ = grpc.SupportPackageIsVersion8 +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 const ( Health_Check_FullMethodName = "/grpc.health.v1.Health/Check" @@ -73,7 +73,7 @@ type HealthClient interface { // should assume this method is not supported and should not retry the // call. If the call terminates with any other status (including OK), // clients should retry the call with appropriate exponential backoff. - Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) + Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error) } type healthClient struct { @@ -94,13 +94,13 @@ func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts . return out, nil } -func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) { +func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &healthWatchClient{ClientStream: stream} + x := &grpc.GenericClientStream[HealthCheckRequest, HealthCheckResponse]{ClientStream: stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -110,26 +110,12 @@ func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts . return x, nil } -type Health_WatchClient interface { - Recv() (*HealthCheckResponse, error) - grpc.ClientStream -} - -type healthWatchClient struct { - grpc.ClientStream -} - -func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) { - m := new(HealthCheckResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type Health_WatchClient = grpc.ServerStreamingClient[HealthCheckResponse] // HealthServer is the server API for Health service. // All implementations should embed UnimplementedHealthServer -// for forward compatibility +// for forward compatibility. // // Health is gRPC's mechanism for checking whether a server is able to handle // RPCs. Its semantics are documented in @@ -160,19 +146,23 @@ type HealthServer interface { // should assume this method is not supported and should not retry the // call. If the call terminates with any other status (including OK), // clients should retry the call with appropriate exponential backoff. - Watch(*HealthCheckRequest, Health_WatchServer) error + Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error } -// UnimplementedHealthServer should be embedded to have forward compatible implementations. -type UnimplementedHealthServer struct { -} +// UnimplementedHealthServer should be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedHealthServer struct{} func (UnimplementedHealthServer) Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Check not implemented") } -func (UnimplementedHealthServer) Watch(*HealthCheckRequest, Health_WatchServer) error { +func (UnimplementedHealthServer) Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error { return status.Errorf(codes.Unimplemented, "method Watch not implemented") } +func (UnimplementedHealthServer) testEmbeddedByValue() {} // UnsafeHealthServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to HealthServer will @@ -182,6 +172,13 @@ type UnsafeHealthServer interface { } func RegisterHealthServer(s grpc.ServiceRegistrar, srv HealthServer) { + // If the following call panics, it indicates UnimplementedHealthServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&Health_ServiceDesc, srv) } @@ -208,21 +205,11 @@ func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { if err := stream.RecvMsg(m); err != nil { return err } - return srv.(HealthServer).Watch(m, &healthWatchServer{ServerStream: stream}) -} - -type Health_WatchServer interface { - Send(*HealthCheckResponse) error - grpc.ServerStream + return srv.(HealthServer).Watch(m, &grpc.GenericServerStream[HealthCheckRequest, HealthCheckResponse]{ServerStream: stream}) } -type healthWatchServer struct { - grpc.ServerStream -} - -func (x *healthWatchServer) Send(m *HealthCheckResponse) error { - return x.ServerStream.SendMsg(m) -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type Health_WatchServer = grpc.ServerStreamingServer[HealthCheckResponse] // Health_ServiceDesc is the grpc.ServiceDesc for Health service. // It's only intended for direct use with grpc.RegisterService, diff --git a/vendor/google.golang.org/grpc/health/server.go b/vendor/google.golang.org/grpc/health/server.go index cce6312d77f9c..d4b4b7081590e 100644 --- a/vendor/google.golang.org/grpc/health/server.go +++ b/vendor/google.golang.org/grpc/health/server.go @@ -51,7 +51,7 @@ func NewServer() *Server { } // Check implements `service Health`. -func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) { +func (s *Server) Check(_ context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) { s.mu.RLock() defer s.mu.RUnlock() if servingStatus, ok := s.statusMap[in.Service]; ok { diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go index 13821a9266065..85540f86a7382 100644 --- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go @@ -33,6 +33,8 @@ type lbConfig struct { childConfig serviceconfig.LoadBalancingConfig } +// ChildName returns the name of the child balancer of the gracefulswitch +// Balancer. func ChildName(l serviceconfig.LoadBalancingConfig) string { return l.(*lbConfig).childBuilder.Name() } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index aa4505a871dfb..9669328914ad4 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -106,7 +106,7 @@ func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry } // Log creates a proto binary log entry, and logs it to the sink. -func (ml *TruncatingMethodLogger) Log(ctx context.Context, c LogEntryConfig) { +func (ml *TruncatingMethodLogger) Log(_ context.Context, c LogEntryConfig) { ml.sink.Write(ml.Build(c)) } diff --git a/vendor/google.golang.org/grpc/internal/channelz/channel.go b/vendor/google.golang.org/grpc/internal/channelz/channel.go index d7e9e1d54ecbc..3ec662799a83a 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/channel.go +++ b/vendor/google.golang.org/grpc/internal/channelz/channel.go @@ -43,6 +43,8 @@ type Channel struct { // Non-zero traceRefCount means the trace of this channel cannot be deleted. traceRefCount int32 + // ChannelMetrics holds connectivity state, target and call metrics for the + // channel within channelz. ChannelMetrics ChannelMetrics } @@ -50,6 +52,8 @@ type Channel struct { // nesting. func (c *Channel) channelzIdentifier() {} +// String returns a string representation of the Channel, including its parent +// entity and ID. func (c *Channel) String() string { if c.Parent == nil { return fmt.Sprintf("Channel #%d", c.ID) @@ -61,24 +65,31 @@ func (c *Channel) id() int64 { return c.ID } +// SubChans returns a copy of the map of sub-channels associated with the +// Channel. func (c *Channel) SubChans() map[int64]string { db.mu.RLock() defer db.mu.RUnlock() return copyMap(c.subChans) } +// NestedChans returns a copy of the map of nested channels associated with the +// Channel. func (c *Channel) NestedChans() map[int64]string { db.mu.RLock() defer db.mu.RUnlock() return copyMap(c.nestedChans) } +// Trace returns a copy of the Channel's trace data. func (c *Channel) Trace() *ChannelTrace { db.mu.RLock() defer db.mu.RUnlock() return c.trace.copy() } +// ChannelMetrics holds connectivity state, target and call metrics for the +// channel within channelz. type ChannelMetrics struct { // The current connectivity state of the channel. State atomic.Pointer[connectivity.State] @@ -136,12 +147,16 @@ func strFromPointer(s *string) string { return *s } +// String returns a string representation of the ChannelMetrics, including its +// state, target, and call metrics. func (c *ChannelMetrics) String() string { return fmt.Sprintf("State: %v, Target: %s, CallsStarted: %v, CallsSucceeded: %v, CallsFailed: %v, LastCallStartedTimestamp: %v", c.State.Load(), strFromPointer(c.Target.Load()), c.CallsStarted.Load(), c.CallsSucceeded.Load(), c.CallsFailed.Load(), c.LastCallStartedTimestamp.Load(), ) } +// NewChannelMetricForTesting creates a new instance of ChannelMetrics with +// specified initial values for testing purposes. func NewChannelMetricForTesting(state connectivity.State, target string, started, succeeded, failed, timestamp int64) *ChannelMetrics { c := &ChannelMetrics{} c.State.Store(&state) diff --git a/vendor/google.golang.org/grpc/internal/channelz/channelmap.go b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go index dfe18b08925d9..64c791953d017 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/channelmap.go +++ b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go @@ -46,7 +46,7 @@ type entry interface { // channelMap is the storage data structure for channelz. // -// Methods of channelMap can be divided in two two categories with respect to +// Methods of channelMap can be divided into two categories with respect to // locking. // // 1. Methods acquire the global lock. @@ -234,13 +234,6 @@ func copyMap(m map[int64]string) map[int64]string { return n } -func min(a, b int) int { - if a < b { - return a - } - return b -} - func (c *channelMap) getTopChannels(id int64, maxResults int) ([]*Channel, bool) { if maxResults <= 0 { maxResults = EntriesPerPage diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index 03e24e1507aa6..078bb81238bc4 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -33,7 +33,7 @@ var ( // outside this package except by tests. IDGen IDGenerator - db *channelMap = newChannelMap() + db = newChannelMap() // EntriesPerPage defines the number of channelz entries to be shown on a web page. EntriesPerPage = 50 curState int32 diff --git a/vendor/google.golang.org/grpc/internal/channelz/server.go b/vendor/google.golang.org/grpc/internal/channelz/server.go index cdfc49d6eacc7..b5a82499299d5 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/server.go +++ b/vendor/google.golang.org/grpc/internal/channelz/server.go @@ -59,6 +59,8 @@ func NewServerMetricsForTesting(started, succeeded, failed, timestamp int64) *Se return sm } +// CopyFrom copies the metrics data from the provided ServerMetrics +// instance into the current instance. func (sm *ServerMetrics) CopyFrom(o *ServerMetrics) { sm.CallsStarted.Store(o.CallsStarted.Load()) sm.CallsSucceeded.Store(o.CallsSucceeded.Load()) diff --git a/vendor/google.golang.org/grpc/internal/channelz/socket.go b/vendor/google.golang.org/grpc/internal/channelz/socket.go index fa64834b25d03..90103847c5f3a 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/socket.go +++ b/vendor/google.golang.org/grpc/internal/channelz/socket.go @@ -70,13 +70,18 @@ type EphemeralSocketMetrics struct { RemoteFlowControlWindow int64 } +// SocketType represents the type of socket. type SocketType string +// SocketType can be one of these. const ( SocketTypeNormal = "NormalSocket" SocketTypeListen = "ListenSocket" ) +// Socket represents a socket within channelz which includes socket +// metrics and data related to socket activity and provides methods +// for managing and interacting with sockets. type Socket struct { Entity SocketType SocketType @@ -100,6 +105,8 @@ type Socket struct { Security credentials.ChannelzSecurityValue } +// String returns a string representation of the Socket, including its parent +// entity, socket type, and ID. func (ls *Socket) String() string { return fmt.Sprintf("%s %s #%d", ls.Parent, ls.SocketType, ls.ID) } diff --git a/vendor/google.golang.org/grpc/internal/channelz/subchannel.go b/vendor/google.golang.org/grpc/internal/channelz/subchannel.go index 3b88e4cba8e10..b20802e6e960d 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/subchannel.go +++ b/vendor/google.golang.org/grpc/internal/channelz/subchannel.go @@ -47,12 +47,14 @@ func (sc *SubChannel) id() int64 { return sc.ID } +// Sockets returns a copy of the sockets map associated with the SubChannel. func (sc *SubChannel) Sockets() map[int64]string { db.mu.RLock() defer db.mu.RUnlock() return copyMap(sc.sockets) } +// Trace returns a copy of the ChannelTrace associated with the SubChannel. func (sc *SubChannel) Trace() *ChannelTrace { db.mu.RLock() defer db.mu.RUnlock() diff --git a/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go index d1ed8df6a5186..0e6e18e185c7a 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go @@ -35,13 +35,13 @@ type SocketOptionData struct { // Getsockopt defines the function to get socket options requested by channelz. // It is to be passed to syscall.RawConn.Control(). // Windows OS doesn't support Socket Option -func (s *SocketOptionData) Getsockopt(fd uintptr) { +func (s *SocketOptionData) Getsockopt(uintptr) { once.Do(func() { logger.Warning("Channelz: socket options are not supported on non-linux environments") }) } // GetSocketOption gets the socket option info of the conn. -func GetSocketOption(c any) *SocketOptionData { +func GetSocketOption(any) *SocketOptionData { return nil } diff --git a/vendor/google.golang.org/grpc/internal/channelz/trace.go b/vendor/google.golang.org/grpc/internal/channelz/trace.go index 36b8674032300..2bffe4777684d 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/trace.go +++ b/vendor/google.golang.org/grpc/internal/channelz/trace.go @@ -79,13 +79,21 @@ type TraceEvent struct { Parent *TraceEvent } +// ChannelTrace provides tracing information for a channel. +// It tracks various events and metadata related to the channel's lifecycle +// and operations. type ChannelTrace struct { - cm *channelMap - clearCalled bool + cm *channelMap + clearCalled bool + // The time when the trace was created. CreationTime time.Time - EventNum int64 - mu sync.Mutex - Events []*traceEvent + // A counter for the number of events recorded in the + // trace. + EventNum int64 + mu sync.Mutex + // A slice of traceEvent pointers representing the events recorded for + // this channel. + Events []*traceEvent } func (c *ChannelTrace) copy() *ChannelTrace { @@ -175,6 +183,7 @@ var refChannelTypeToString = map[RefChannelType]string{ RefNormalSocket: "NormalSocket", } +// String returns a string representation of the RefChannelType func (r RefChannelType) String() string { return refChannelTypeToString[r] } diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index d906487139445..6e7dd6b772709 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -45,7 +45,16 @@ var ( // option is present for backward compatibility. This option may be overridden // by setting the environment variable "GRPC_ENFORCE_ALPN_ENABLED" to "true" // or "false". - EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", false) + EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", true) + // XDSFallbackSupport is the env variable that controls whether support for + // xDS fallback is turned on. If this is unset or is false, only the first + // xDS server in the list of server configs will be used. + XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", false) + // NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used + // instead of the exiting pickfirst implementation. This can be enabled by + // setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST" + // to "true". + NewPickFirstEnabled = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST", false) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/vendor/google.golang.org/grpc/internal/experimental.go b/vendor/google.golang.org/grpc/internal/experimental.go index 7f7044e1731c8..7617be2158957 100644 --- a/vendor/google.golang.org/grpc/internal/experimental.go +++ b/vendor/google.golang.org/grpc/internal/experimental.go @@ -18,11 +18,11 @@ package internal var ( - // WithRecvBufferPool is implemented by the grpc package and returns a dial + // WithBufferPool is implemented by the grpc package and returns a dial // option to configure a shared buffer pool for a grpc.ClientConn. - WithRecvBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption + WithBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption - // RecvBufferPool is implemented by the grpc package and returns a server + // BufferPool is implemented by the grpc package and returns a server // option to configure a shared buffer pool for a grpc.Server. - RecvBufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption + BufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption ) diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go similarity index 63% rename from vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go rename to vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go index faa998de7632b..092ad187a2c8d 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go @@ -16,17 +16,21 @@ * */ +// Package grpclog provides logging functionality for internal gRPC packages, +// outside of the functionality provided by the external `grpclog` package. package grpclog import ( "fmt" + + "google.golang.org/grpc/grpclog" ) // PrefixLogger does logging with a prefix. // // Logging method on a nil logs without any prefix. type PrefixLogger struct { - logger DepthLoggerV2 + logger grpclog.DepthLoggerV2 prefix string } @@ -38,7 +42,7 @@ func (pl *PrefixLogger) Infof(format string, args ...any) { pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) return } - InfoDepth(1, fmt.Sprintf(format, args...)) + grpclog.InfoDepth(1, fmt.Sprintf(format, args...)) } // Warningf does warning logging. @@ -48,7 +52,7 @@ func (pl *PrefixLogger) Warningf(format string, args ...any) { pl.logger.WarningDepth(1, fmt.Sprintf(format, args...)) return } - WarningDepth(1, fmt.Sprintf(format, args...)) + grpclog.WarningDepth(1, fmt.Sprintf(format, args...)) } // Errorf does error logging. @@ -58,36 +62,18 @@ func (pl *PrefixLogger) Errorf(format string, args ...any) { pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...)) return } - ErrorDepth(1, fmt.Sprintf(format, args...)) -} - -// Debugf does info logging at verbose level 2. -func (pl *PrefixLogger) Debugf(format string, args ...any) { - // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe - // rewrite PrefixLogger a little to ensure that we don't use the global - // `Logger` here, and instead use the `logger` field. - if !Logger.V(2) { - return - } - if pl != nil { - // Handle nil, so the tests can pass in a nil logger. - format = pl.prefix + format - pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) - return - } - InfoDepth(1, fmt.Sprintf(format, args...)) - + grpclog.ErrorDepth(1, fmt.Sprintf(format, args...)) } // V reports whether verbosity level l is at least the requested verbose level. func (pl *PrefixLogger) V(l int) bool { - // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe - // rewrite PrefixLogger a little to ensure that we don't use the global - // `Logger` here, and instead use the `logger` field. - return Logger.V(l) + if pl != nil { + return pl.logger.V(l) + } + return true } // NewPrefixLogger creates a prefix logger with the given prefix. -func NewPrefixLogger(logger DepthLoggerV2, prefix string) *PrefixLogger { +func NewPrefixLogger(logger grpclog.DepthLoggerV2, prefix string) *PrefixLogger { return &PrefixLogger{logger: logger, prefix: prefix} } diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go index f7f40a16acee5..8e8e861280a07 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -53,16 +53,28 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { return cs } -// Schedule adds a callback to be scheduled after existing callbacks are run. +// TrySchedule tries to schedule the provided callback function f to be +// executed in the order it was added. This is a best-effort operation. If the +// context passed to NewCallbackSerializer was canceled before this method is +// called, the callback will not be scheduled. // // Callbacks are expected to honor the context when performing any blocking // operations, and should return early when the context is canceled. +func (cs *CallbackSerializer) TrySchedule(f func(ctx context.Context)) { + cs.callbacks.Put(f) +} + +// ScheduleOr schedules the provided callback function f to be executed in the +// order it was added. If the context passed to NewCallbackSerializer has been +// canceled before this method is called, the onFailure callback will be +// executed inline instead. // -// Return value indicates if the callback was successfully added to the list of -// callbacks to be executed by the serializer. It is not possible to add -// callbacks once the context passed to NewCallbackSerializer is cancelled. -func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { - return cs.callbacks.Put(f) == nil +// Callbacks are expected to honor the context when performing any blocking +// operations, and should return early when the context is canceled. +func (cs *CallbackSerializer) ScheduleOr(f func(ctx context.Context), onFailure func()) { + if cs.callbacks.Put(f) != nil { + onFailure() + } } func (cs *CallbackSerializer) run(ctx context.Context) { diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go index aef8cec1ab0cd..6d8c2f518dff7 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go @@ -77,7 +77,7 @@ func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) { if ps.msg != nil { msg := ps.msg - ps.cs.Schedule(func(context.Context) { + ps.cs.TrySchedule(func(context.Context) { ps.mu.Lock() defer ps.mu.Unlock() if !ps.subscribers[sub] { @@ -103,7 +103,7 @@ func (ps *PubSub) Publish(msg any) { ps.msg = msg for sub := range ps.subscribers { s := sub - ps.cs.Schedule(func(context.Context) { + ps.cs.TrySchedule(func(context.Context) { ps.mu.Lock() defer ps.mu.Unlock() if !ps.subscribers[s] { diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/vendor/google.golang.org/grpc/internal/grpcutil/method.go index ec62b4775e5b4..683d1955c6a11 100644 --- a/vendor/google.golang.org/grpc/internal/grpcutil/method.go +++ b/vendor/google.golang.org/grpc/internal/grpcutil/method.go @@ -39,7 +39,7 @@ func ParseMethod(methodName string) (service, method string, _ error) { } // baseContentType is the base content-type for gRPC. This is a valid -// content-type on it's own, but can also include a content-subtype such as +// content-type on its own, but can also include a content-subtype such as // "proto" as a suffix after "+" or ";". See // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests // for more details. diff --git a/vendor/google.golang.org/grpc/internal/idle/idle.go b/vendor/google.golang.org/grpc/internal/idle/idle.go index fe49cb74c55a3..2c13ee9dac753 100644 --- a/vendor/google.golang.org/grpc/internal/idle/idle.go +++ b/vendor/google.golang.org/grpc/internal/idle/idle.go @@ -182,6 +182,7 @@ func (m *Manager) tryEnterIdleMode() bool { return true } +// EnterIdleModeForTesting instructs the channel to enter idle mode. func (m *Manager) EnterIdleModeForTesting() { m.tryEnterIdleMode() } @@ -225,7 +226,7 @@ func (m *Manager) ExitIdleMode() error { // came in and OnCallBegin() noticed that the calls count is negative. // - Channel is in idle mode, and multiple new RPCs come in at the same // time, all of them notice a negative calls count in OnCallBegin and get - // here. The first one to get the lock would got the channel to exit idle. + // here. The first one to get the lock would get the channel to exit idle. // - Channel is not in idle mode, and the user calls Connect which calls // m.ExitIdleMode. // @@ -266,6 +267,7 @@ func (m *Manager) isClosed() bool { return atomic.LoadInt32(&m.closed) == 1 } +// Close stops the timer associated with the Manager, if it exists. func (m *Manager) Close() { atomic.StoreInt32(&m.closed, 1) diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 5d66539869232..20b4dc3d35369 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -183,7 +183,7 @@ var ( // GRPCResolverSchemeExtraMetadata determines when gRPC will add extra // metadata to RPCs. - GRPCResolverSchemeExtraMetadata string = "xds" + GRPCResolverSchemeExtraMetadata = "xds" // EnterIdleModeForTesting gets the ClientConn to enter IDLE mode. EnterIdleModeForTesting any // func(*grpc.ClientConn) @@ -191,6 +191,8 @@ var ( // ExitIdleModeForTesting gets the ClientConn to exit IDLE mode. ExitIdleModeForTesting any // func(*grpc.ClientConn) error + // ChannelzTurnOffForTesting disables the Channelz service for testing + // purposes. ChannelzTurnOffForTesting func() // TriggerXDSResourceNotFoundForTesting causes the provided xDS Client to @@ -203,11 +205,27 @@ var ( // UserSetDefaultScheme is set to true if the user has overridden the // default resolver scheme. - UserSetDefaultScheme bool = false + UserSetDefaultScheme = false - // ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n - // is the number of elements. swap swaps the elements with indexes i and j. - ShuffleAddressListForTesting any // func(n int, swap func(i, j int)) + // ConnectedAddress returns the connected address for a SubConnState. The + // address is only valid if the state is READY. + ConnectedAddress any // func (scs SubConnState) resolver.Address + + // SetConnectedAddress sets the connected address for a SubConnState. + SetConnectedAddress any // func(scs *SubConnState, addr resolver.Address) + + // SnapshotMetricRegistryForTesting snapshots the global data of the metric + // registry. Returns a cleanup function that sets the metric registry to its + // original state. Only called in testing functions. + SnapshotMetricRegistryForTesting func() func() + + // SetDefaultBufferPoolForTesting updates the default buffer pool, for + // testing purposes. + SetDefaultBufferPoolForTesting any // func(mem.BufferPool) + + // SetBufferPoolingThresholdForTesting updates the buffer pooling threshold, for + // testing purposes. + SetBufferPoolingThresholdForTesting any // func(int) ) // HealthChecker defines the signature of the client-side LB channel health @@ -215,7 +233,7 @@ var ( // // The implementation is expected to create a health checking RPC stream by // calling newStream(), watch for the health status of serviceName, and report -// it's health back by calling setConnectivityState(). +// its health back by calling setConnectivityState(). // // The health checking protocol is defined at: // https://github.com/grpc/grpc/blob/master/doc/health-checking.md diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 4552db16b0280..8691698ef2232 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -177,7 +177,7 @@ type dnsResolver struct { // finished. Otherwise, data race will be possible. [Race Example] in // dns_resolver_test we replace the real lookup functions with mocked ones to // facilitate testing. If Close() doesn't wait for watcher() goroutine - // finishes, race detector sometimes will warns lookup (READ the lookup + // finishes, race detector sometimes will warn lookup (READ the lookup // function pointers) inside watcher() goroutine has data race with // replaceNetFunc (WRITE the lookup function pointers). wg sync.WaitGroup diff --git a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go index afac56572ad55..b901c7bace506 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go +++ b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go @@ -55,7 +55,7 @@ func (r *passthroughResolver) start() { r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}}) } -func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {} +func (*passthroughResolver) ResolveNow(resolver.ResolveNowOptions) {} func (*passthroughResolver) Close() {} diff --git a/vendor/google.golang.org/grpc/internal/stats/labels.go b/vendor/google.golang.org/grpc/internal/stats/labels.go new file mode 100644 index 0000000000000..fd33af51ae896 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/stats/labels.go @@ -0,0 +1,42 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package stats provides internal stats related functionality. +package stats + +import "context" + +// Labels are the labels for metrics. +type Labels struct { + // TelemetryLabels are the telemetry labels to record. + TelemetryLabels map[string]string +} + +type labelsKey struct{} + +// GetLabels returns the Labels stored in the context, or nil if there is one. +func GetLabels(ctx context.Context) *Labels { + labels, _ := ctx.Value(labelsKey{}).(*Labels) + return labels +} + +// SetLabels sets the Labels in the context. +func SetLabels(ctx context.Context, labels *Labels) context.Context { + // could also append + return context.WithValue(ctx, labelsKey{}, labels) +} diff --git a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go new file mode 100644 index 0000000000000..79044657be150 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go @@ -0,0 +1,105 @@ +/* + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package stats + +import ( + "fmt" + + estats "google.golang.org/grpc/experimental/stats" + "google.golang.org/grpc/stats" +) + +// MetricsRecorderList forwards Record calls to all of its metricsRecorders. +// +// It eats any record calls where the label values provided do not match the +// number of label keys. +type MetricsRecorderList struct { + // metricsRecorders are the metrics recorders this list will forward to. + metricsRecorders []estats.MetricsRecorder +} + +// NewMetricsRecorderList creates a new metric recorder list with all the stats +// handlers provided which implement the MetricsRecorder interface. +// If no stats handlers provided implement the MetricsRecorder interface, +// the MetricsRecorder list returned is a no-op. +func NewMetricsRecorderList(shs []stats.Handler) *MetricsRecorderList { + var mrs []estats.MetricsRecorder + for _, sh := range shs { + if mr, ok := sh.(estats.MetricsRecorder); ok { + mrs = append(mrs, mr) + } + } + return &MetricsRecorderList{ + metricsRecorders: mrs, + } +} + +func verifyLabels(desc *estats.MetricDescriptor, labelsRecv ...string) { + if got, want := len(labelsRecv), len(desc.Labels)+len(desc.OptionalLabels); got != want { + panic(fmt.Sprintf("Received %d labels in call to record metric %q, but expected %d.", got, desc.Name, want)) + } +} + +// RecordInt64Count records the measurement alongside labels on the int +// count associated with the provided handle. +func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64Count(handle, incr, labels...) + } +} + +// RecordFloat64Count records the measurement alongside labels on the float +// count associated with the provided handle. +func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordFloat64Count(handle, incr, labels...) + } +} + +// RecordInt64Histo records the measurement alongside labels on the int +// histo associated with the provided handle. +func (l *MetricsRecorderList) RecordInt64Histo(handle *estats.Int64HistoHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64Histo(handle, incr, labels...) + } +} + +// RecordFloat64Histo records the measurement alongside labels on the float +// histo associated with the provided handle. +func (l *MetricsRecorderList) RecordFloat64Histo(handle *estats.Float64HistoHandle, incr float64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordFloat64Histo(handle, incr, labels...) + } +} + +// RecordInt64Gauge records the measurement alongside labels on the int +// gauge associated with the provided handle. +func (l *MetricsRecorderList) RecordInt64Gauge(handle *estats.Int64GaugeHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64Gauge(handle, incr, labels...) + } +} diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index c7dbc82059525..1186f1e9a9ad9 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -138,17 +138,19 @@ func (s *Status) WithDetails(details ...protoadapt.MessageV1) (*Status, error) { // s.Code() != OK implies that s.Proto() != nil. p := s.Proto() for _, detail := range details { - any, err := anypb.New(protoadapt.MessageV2Of(detail)) + m, err := anypb.New(protoadapt.MessageV2Of(detail)) if err != nil { return nil, err } - p.Details = append(p.Details, any) + p.Details = append(p.Details, m) } return &Status{s: p}, nil } // Details returns a slice of details messages attached to the status. // If a detail cannot be decoded, the error is returned in place of the detail. +// If the detail can be decoded, the proto message returned is of the same +// type that was given to WithDetails(). func (s *Status) Details() []any { if s == nil || s.s == nil { return nil @@ -160,7 +162,38 @@ func (s *Status) Details() []any { details = append(details, err) continue } - details = append(details, detail) + // The call to MessageV1Of is required to unwrap the proto message if + // it implemented only the MessageV1 API. The proto message would have + // been wrapped in a V2 wrapper in Status.WithDetails. V2 messages are + // added to a global registry used by any.UnmarshalNew(). + // MessageV1Of has the following behaviour: + // 1. If the given message is a wrapped MessageV1, it returns the + // unwrapped value. + // 2. If the given message already implements MessageV1, it returns it + // as is. + // 3. Else, it wraps the MessageV2 in a MessageV1 wrapper. + // + // Since the Status.WithDetails() API only accepts MessageV1, calling + // MessageV1Of ensures we return the same type that was given to + // WithDetails: + // * If the give type implemented only MessageV1, the unwrapping from + // point 1 above will restore the type. + // * If the given type implemented both MessageV1 and MessageV2, point 2 + // above will ensure no wrapping is performed. + // * If the given type implemented only MessageV2 and was wrapped using + // MessageV1Of before passing to WithDetails(), it would be unwrapped + // in WithDetails by calling MessageV2Of(). Point 3 above will ensure + // that the type is wrapped in a MessageV1 wrapper again before + // returning. Note that protoc-gen-go doesn't generate code which + // implements ONLY MessageV2 at the time of writing. + // + // NOTE: Status details can also be added using the FromProto method. + // This could theoretically allow passing a Detail message that only + // implements the V2 API. In such a case the message will be wrapped in + // a MessageV1 wrapper when fetched using Details(). + // Since protoc-gen-go generates only code that implements both V1 and + // V2 APIs for backward compatibility, this is not a concern. + details = append(details, protoadapt.MessageV1Of(detail)) } return details } diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go index 999f52cd75bdb..54c24c2ff3865 100644 --- a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go @@ -58,20 +58,20 @@ func GetRusage() *Rusage { // CPUTimeDiff returns the differences of user CPU time and system CPU time used // between two Rusage structs. It a no-op function for non-linux environments. -func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { +func CPUTimeDiff(*Rusage, *Rusage) (float64, float64) { log() return 0, 0 } // SetTCPUserTimeout is a no-op function under non-linux environments. -func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { +func SetTCPUserTimeout(net.Conn, time.Duration) error { log() return nil } // GetTCPUserTimeout is a no-op function under non-linux environments. // A negative return value indicates the operation is not supported -func GetTCPUserTimeout(conn net.Conn) (int, error) { +func GetTCPUserTimeout(net.Conn) (int, error) { log() return -1, nil } diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go index 078137b7fd705..7e7aaa5463683 100644 --- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go @@ -44,7 +44,7 @@ func NetDialerWithTCPKeepalive() *net.Dialer { // combination of unconditionally enabling TCP keepalives here, and // disabling the overriding of TCP keepalive parameters by setting the // KeepAlive field to a negative value above, results in OS defaults for - // the TCP keealive interval and time parameters. + // the TCP keepalive interval and time parameters. Control: func(_, _ string, c syscall.RawConn) error { return c.Control(func(fd uintptr) { unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1) diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go index fd7d43a8907ba..d5c1085eeaecd 100644 --- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go @@ -44,7 +44,7 @@ func NetDialerWithTCPKeepalive() *net.Dialer { // combination of unconditionally enabling TCP keepalives here, and // disabling the overriding of TCP keepalive parameters by setting the // KeepAlive field to a negative value above, results in OS defaults for - // the TCP keealive interval and time parameters. + // the TCP keepalive interval and time parameters. Control: func(_, _ string, c syscall.RawConn) error { return c.Control(func(fd uintptr) { windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_KEEPALIVE, 1) diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index 3deadfb4a20c9..ef72fbb3a0163 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -32,6 +32,7 @@ import ( "golang.org/x/net/http2/hpack" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/mem" "google.golang.org/grpc/status" ) @@ -148,9 +149,9 @@ type dataFrame struct { streamID uint32 endStream bool h []byte - d []byte + reader mem.Reader // onEachWrite is called every time - // a part of d is written out. + // a part of data is written out. onEachWrite func() } @@ -289,18 +290,22 @@ func (l *outStreamList) dequeue() *outStream { } // controlBuffer is a way to pass information to loopy. -// Information is passed as specific struct types called control frames. -// A control frame not only represents data, messages or headers to be sent out -// but can also be used to instruct loopy to update its internal state. -// It shouldn't be confused with an HTTP2 frame, although some of the control frames -// like dataFrame and headerFrame do go out on wire as HTTP2 frames. +// +// Information is passed as specific struct types called control frames. A +// control frame not only represents data, messages or headers to be sent out +// but can also be used to instruct loopy to update its internal state. It +// shouldn't be confused with an HTTP2 frame, although some of the control +// frames like dataFrame and headerFrame do go out on wire as HTTP2 frames. type controlBuffer struct { - ch chan struct{} - done <-chan struct{} + wakeupCh chan struct{} // Unblocks readers waiting for something to read. + done <-chan struct{} // Closed when the transport is done. + + // Mutex guards all the fields below, except trfChan which can be read + // atomically without holding mu. mu sync.Mutex - consumerWaiting bool - list *itemList - err error + consumerWaiting bool // True when readers are blocked waiting for new data. + closed bool // True when the controlbuf is finished. + list *itemList // List of queued control frames. // transportResponseFrames counts the number of queued items that represent // the response of an action initiated by the peer. trfChan is created @@ -308,47 +313,59 @@ type controlBuffer struct { // closed and nilled when transportResponseFrames drops below the // threshold. Both fields are protected by mu. transportResponseFrames int - trfChan atomic.Value // chan struct{} + trfChan atomic.Pointer[chan struct{}] } func newControlBuffer(done <-chan struct{}) *controlBuffer { return &controlBuffer{ - ch: make(chan struct{}, 1), - list: &itemList{}, - done: done, + wakeupCh: make(chan struct{}, 1), + list: &itemList{}, + done: done, } } -// throttle blocks if there are too many incomingSettings/cleanupStreams in the -// controlbuf. +// throttle blocks if there are too many frames in the control buf that +// represent the response of an action initiated by the peer, like +// incomingSettings cleanupStreams etc. func (c *controlBuffer) throttle() { - ch, _ := c.trfChan.Load().(chan struct{}) - if ch != nil { + if ch := c.trfChan.Load(); ch != nil { select { - case <-ch: + case <-(*ch): case <-c.done: } } } +// put adds an item to the controlbuf. func (c *controlBuffer) put(it cbItem) error { _, err := c.executeAndPut(nil, it) return err } +// executeAndPut runs f, and if the return value is true, adds the given item to +// the controlbuf. The item could be nil, in which case, this method simply +// executes f and does not add the item to the controlbuf. +// +// The first return value indicates whether the item was successfully added to +// the control buffer. A non-nil error, specifically ErrConnClosing, is returned +// if the control buffer is already closed. func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) { - var wakeUp bool c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return false, c.err + defer c.mu.Unlock() + + if c.closed { + return false, ErrConnClosing } if f != nil { if !f() { // f wasn't successful - c.mu.Unlock() return false, nil } } + if it == nil { + return true, nil + } + + var wakeUp bool if c.consumerWaiting { wakeUp = true c.consumerWaiting = false @@ -359,98 +376,102 @@ func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) { if c.transportResponseFrames == maxQueuedTransportResponseFrames { // We are adding the frame that puts us over the threshold; create // a throttling channel. - c.trfChan.Store(make(chan struct{})) + ch := make(chan struct{}) + c.trfChan.Store(&ch) } } - c.mu.Unlock() if wakeUp { select { - case c.ch <- struct{}{}: + case c.wakeupCh <- struct{}{}: default: } } return true, nil } -// Note argument f should never be nil. -func (c *controlBuffer) execute(f func(it any) bool, it any) (bool, error) { - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return false, c.err - } - if !f(it) { // f wasn't successful - c.mu.Unlock() - return false, nil - } - c.mu.Unlock() - return true, nil -} - +// get returns the next control frame from the control buffer. If block is true +// **and** there are no control frames in the control buffer, the call blocks +// until one of the conditions is met: there is a frame to return or the +// transport is closed. func (c *controlBuffer) get(block bool) (any, error) { for { c.mu.Lock() - if c.err != nil { + frame, err := c.getOnceLocked() + if frame != nil || err != nil || !block { + // If we read a frame or an error, we can return to the caller. The + // call to getOnceLocked() returns a nil frame and a nil error if + // there is nothing to read, and in that case, if the caller asked + // us not to block, we can return now as well. c.mu.Unlock() - return nil, c.err - } - if !c.list.isEmpty() { - h := c.list.dequeue().(cbItem) - if h.isTransportResponseFrame() { - if c.transportResponseFrames == maxQueuedTransportResponseFrames { - // We are removing the frame that put us over the - // threshold; close and clear the throttling channel. - ch := c.trfChan.Load().(chan struct{}) - close(ch) - c.trfChan.Store((chan struct{})(nil)) - } - c.transportResponseFrames-- - } - c.mu.Unlock() - return h, nil - } - if !block { - c.mu.Unlock() - return nil, nil + return frame, err } c.consumerWaiting = true c.mu.Unlock() + + // Release the lock above and wait to be woken up. select { - case <-c.ch: + case <-c.wakeupCh: case <-c.done: return nil, errors.New("transport closed by client") } } } +// Callers must not use this method, but should instead use get(). +// +// Caller must hold c.mu. +func (c *controlBuffer) getOnceLocked() (any, error) { + if c.closed { + return false, ErrConnClosing + } + if c.list.isEmpty() { + return nil, nil + } + h := c.list.dequeue().(cbItem) + if h.isTransportResponseFrame() { + if c.transportResponseFrames == maxQueuedTransportResponseFrames { + // We are removing the frame that put us over the + // threshold; close and clear the throttling channel. + ch := c.trfChan.Swap(nil) + close(*ch) + } + c.transportResponseFrames-- + } + return h, nil +} + +// finish closes the control buffer, cleaning up any streams that have queued +// header frames. Once this method returns, no more frames can be added to the +// control buffer, and attempts to do so will return ErrConnClosing. func (c *controlBuffer) finish() { c.mu.Lock() - if c.err != nil { - c.mu.Unlock() + defer c.mu.Unlock() + + if c.closed { return } - c.err = ErrConnClosing + c.closed = true // There may be headers for streams in the control buffer. // These streams need to be cleaned out since the transport // is still not aware of these yet. for head := c.list.dequeueAll(); head != nil; head = head.next { - hdr, ok := head.it.(*headerFrame) - if !ok { - continue - } - if hdr.onOrphaned != nil { // It will be nil on the server-side. - hdr.onOrphaned(ErrConnClosing) + switch v := head.it.(type) { + case *headerFrame: + if v.onOrphaned != nil { // It will be nil on the server-side. + v.onOrphaned(ErrConnClosing) + } + case *dataFrame: + _ = v.reader.Close() } } + // In case throttle() is currently in flight, it needs to be unblocked. // Otherwise, the transport may not close, since the transport is closed by // the reader encountering the connection error. - ch, _ := c.trfChan.Load().(chan struct{}) + ch := c.trfChan.Swap(nil) if ch != nil { - close(ch) + close(*ch) } - c.trfChan.Store((chan struct{})(nil)) - c.mu.Unlock() } type side int @@ -466,7 +487,7 @@ const ( // stream maintains a queue of data frames; as loopy receives data frames // it gets added to the queue of the relevant stream. // Loopy goes over this list of active streams by processing one node every iteration, -// thereby closely resemebling to a round-robin scheduling over all streams. While +// thereby closely resembling a round-robin scheduling over all streams. While // processing a stream, loopy writes out data bytes from this stream capped by the min // of http2MaxFrameLen, connection-level flow control and stream-level flow control. type loopyWriter struct { @@ -490,12 +511,13 @@ type loopyWriter struct { draining bool conn net.Conn logger *grpclog.PrefixLogger + bufferPool mem.BufferPool // Side-specific handlers ssGoAwayHandler func(*goAway) (bool, error) } -func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error)) *loopyWriter { +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error), bufferPool mem.BufferPool) *loopyWriter { var buf bytes.Buffer l := &loopyWriter{ side: s, @@ -511,6 +533,7 @@ func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimato conn: conn, logger: logger, ssGoAwayHandler: goAwayHandler, + bufferPool: bufferPool, } return l } @@ -768,6 +791,11 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { // not be established yet. delete(l.estdStreams, c.streamID) str.deleteSelf() + for head := str.itl.dequeueAll(); head != nil; head = head.next { + if df, ok := head.it.(*dataFrame); ok { + _ = df.reader.Close() + } + } } if c.rst { // If RST_STREAM needs to be sent. if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil { @@ -903,16 +931,18 @@ func (l *loopyWriter) processData() (bool, error) { dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. // A data item is represented by a dataFrame, since it later translates into // multiple HTTP2 data frames. - // Every dataFrame has two buffers; h that keeps grpc-message header and d that is actual data. - // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the - // maximum possible HTTP2 frame size. + // Every dataFrame has two buffers; h that keeps grpc-message header and data + // that is the actual message. As an optimization to keep wire traffic low, data + // from data is copied to h to make as big as the maximum possible HTTP2 frame + // size. - if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame + if len(dataItem.h) == 0 && dataItem.reader.Remaining() == 0 { // Empty data frame // Client sends out empty data frame with endStream = true if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil { return false, err } str.itl.dequeue() // remove the empty data item from stream + _ = dataItem.reader.Close() if str.itl.isEmpty() { str.state = empty } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers. @@ -927,9 +957,7 @@ func (l *loopyWriter) processData() (bool, error) { } return false, nil } - var ( - buf []byte - ) + // Figure out the maximum size we can send maxSize := http2MaxFrameLen if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control. @@ -943,43 +971,50 @@ func (l *loopyWriter) processData() (bool, error) { } // Compute how much of the header and data we can send within quota and max frame length hSize := min(maxSize, len(dataItem.h)) - dSize := min(maxSize-hSize, len(dataItem.d)) - if hSize != 0 { - if dSize == 0 { - buf = dataItem.h - } else { - // We can add some data to grpc message header to distribute bytes more equally across frames. - // Copy on the stack to avoid generating garbage - var localBuf [http2MaxFrameLen]byte - copy(localBuf[:hSize], dataItem.h) - copy(localBuf[hSize:], dataItem.d[:dSize]) - buf = localBuf[:hSize+dSize] - } + dSize := min(maxSize-hSize, dataItem.reader.Remaining()) + remainingBytes := len(dataItem.h) + dataItem.reader.Remaining() - hSize - dSize + size := hSize + dSize + + var buf *[]byte + + if hSize != 0 && dSize == 0 { + buf = &dataItem.h } else { - buf = dataItem.d - } + // Note: this is only necessary because the http2.Framer does not support + // partially writing a frame, so the sequence must be materialized into a buffer. + // TODO: Revisit once https://github.com/golang/go/issues/66655 is addressed. + pool := l.bufferPool + if pool == nil { + // Note that this is only supposed to be nil in tests. Otherwise, stream is + // always initialized with a BufferPool. + pool = mem.DefaultBufferPool() + } + buf = pool.Get(size) + defer pool.Put(buf) - size := hSize + dSize + copy((*buf)[:hSize], dataItem.h) + _, _ = dataItem.reader.Read((*buf)[hSize:]) + } // Now that outgoing flow controls are checked we can replenish str's write quota str.wq.replenish(size) var endStream bool // If this is the last data message on this stream and all of it can be written in this iteration. - if dataItem.endStream && len(dataItem.h)+len(dataItem.d) <= size { + if dataItem.endStream && remainingBytes == 0 { endStream = true } if dataItem.onEachWrite != nil { dataItem.onEachWrite() } - if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil { + if err := l.framer.fr.WriteData(dataItem.streamID, endStream, (*buf)[:size]); err != nil { return false, err } str.bytesOutStanding += size l.sendQuota -= uint32(size) dataItem.h = dataItem.h[hSize:] - dataItem.d = dataItem.d[dSize:] - if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out. + if remainingBytes == 0 { // All the data from that message was written out. + _ = dataItem.reader.Close() str.itl.dequeue() } if str.itl.isEmpty() { @@ -998,10 +1033,3 @@ func (l *loopyWriter) processData() (bool, error) { } return false, nil } - -func min(a, b int) int { - if a < b { - return a - } - return b -} diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 4a3ddce29a4e7..ce878693bd741 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -24,7 +24,6 @@ package transport import ( - "bytes" "context" "errors" "fmt" @@ -40,6 +39,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -50,7 +50,7 @@ import ( // NewServerHandlerTransport returns a ServerTransport handling gRPC from // inside an http.Handler, or writes an HTTP error to w and returns an error. // It requires that the http Server supports HTTP/2. -func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) { +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) { if r.Method != http.MethodPost { w.Header().Set("Allow", http.MethodPost) msg := fmt.Sprintf("invalid gRPC request method %q", r.Method) @@ -98,6 +98,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s contentType: contentType, contentSubtype: contentSubtype, stats: stats, + bufferPool: bufferPool, } st.logger = prefixLoggerForServerHandlerTransport(st) @@ -171,6 +172,8 @@ type serverHandlerTransport struct { stats []stats.Handler logger *grpclog.PrefixLogger + + bufferPool mem.BufferPool } func (ht *serverHandlerTransport) Close(err error) { @@ -244,6 +247,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro } s.hdrMu.Lock() + defer s.hdrMu.Unlock() if p := st.Proto(); p != nil && len(p.Details) > 0 { delete(s.trailer, grpcStatusDetailsBinHeader) stBytes, err := proto.Marshal(p) @@ -268,7 +272,6 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro } } } - s.hdrMu.Unlock() }) if err == nil { // transport has not been closed @@ -330,16 +333,28 @@ func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { s.hdrMu.Unlock() } -func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { +func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error { + // Always take a reference because otherwise there is no guarantee the data will + // be available after this function returns. This is what callers to Write + // expect. + data.Ref() headersWritten := s.updateHeaderSent() - return ht.do(func() { + err := ht.do(func() { + defer data.Free() if !headersWritten { ht.writePendingHeaders(s) } ht.rw.Write(hdr) - ht.rw.Write(data) + for _, b := range data { + _, _ = ht.rw.Write(b.ReadOnlyData()) + } ht.rw.(http.Flusher).Flush() }) + if err != nil { + data.Free() + return err + } + return nil } func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { @@ -406,7 +421,7 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream headerWireLength: 0, // won't have access to header wire length until golang/go#18997. } s.trReader = &transportReader{ - reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, + reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf}, windowHandler: func(int) {}, } @@ -415,21 +430,19 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream go func() { defer close(readerDone) - // TODO: minimize garbage, optimize recvBuffer code/ownership - const readSize = 8196 - for buf := make([]byte, readSize); ; { - n, err := req.Body.Read(buf) + for { + buf := ht.bufferPool.Get(http2MaxFrameLen) + n, err := req.Body.Read(*buf) if n > 0 { - s.buf.put(recvMsg{buffer: bytes.NewBuffer(buf[:n:n])}) - buf = buf[n:] + *buf = (*buf)[:n] + s.buf.put(recvMsg{buffer: mem.NewBuffer(buf, ht.bufferPool)}) + } else { + ht.bufferPool.Put(buf) } if err != nil { s.buf.put(recvMsg{err: mapRecvMsgError(err)}) return } - if len(buf) == 0 { - buf = make([]byte, readSize) - } } }() @@ -462,7 +475,7 @@ func (ht *serverHandlerTransport) IncrMsgSent() {} func (ht *serverHandlerTransport) IncrMsgRecv() {} -func (ht *serverHandlerTransport) Drain(debugData string) { +func (ht *serverHandlerTransport) Drain(string) { panic("Drain() is not implemented") } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 3c63c706986da..62b81885d8ef7 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -47,6 +47,7 @@ import ( isyscall "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/internal/transport/networktype" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" @@ -59,6 +60,8 @@ import ( // atomically. var clientConnectionCounter uint64 +var goAwayLoopyWriterTimeout = 5 * time.Second + var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool)) // http2Client implements the ClientTransport interface with HTTP2. @@ -83,9 +86,9 @@ type http2Client struct { writerDone chan struct{} // sync point to enable testing. // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) // that the server sent GoAway on this transport. - goAway chan struct{} - - framer *framer + goAway chan struct{} + keepaliveDone chan struct{} // Closed when the keepalive goroutine exits. + framer *framer // controlBuf delivers all the control related tasks (e.g., window // updates, reset streams, and various settings) to the controller. // Do not access controlBuf with mu held. @@ -144,7 +147,7 @@ type http2Client struct { onClose func(GoAwayReason) - bufferPool *bufferPool + bufferPool mem.BufferPool connectionID uint64 logger *grpclog.PrefixLogger @@ -229,7 +232,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } }(conn) - // The following defer and goroutine monitor the connectCtx for cancelation + // The following defer and goroutine monitor the connectCtx for cancellation // and deadline. On context expiration, the connection is hard closed and // this function will naturally fail as a result. Otherwise, the defer // waits for the goroutine to exit to prevent the context from being @@ -332,6 +335,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts readerDone: make(chan struct{}), writerDone: make(chan struct{}), goAway: make(chan struct{}), + keepaliveDone: make(chan struct{}), framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize), fc: &trInFlow{limit: uint32(icwz)}, scheme: scheme, @@ -346,7 +350,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts streamQuota: defaultMaxStreamsClient, streamsQuotaAvailable: make(chan struct{}, 1), keepaliveEnabled: keepaliveEnabled, - bufferPool: newBufferPool(), + bufferPool: opts.BufferPool, onClose: onClose, } var czSecurity credentials.ChannelzSecurityValue @@ -463,7 +467,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts return nil, err } go func() { - t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler) + t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool) if err := t.loopy.run(); !isIOError(err) { // Immediately close the connection, as the loopy writer returns // when there are no more active streams and we were draining (the @@ -504,7 +508,6 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { closeStream: func(err error) { t.CloseStream(s, err) }, - freeBuffer: t.bufferPool.put, }, windowHandler: func(n int) { t.updateWindow(s, uint32(n)) @@ -525,8 +528,9 @@ func (t *http2Client) getPeer() *peer.Peer { // to be the last frame loopy writes to the transport. func (t *http2Client) outgoingGoAwayHandler(g *goAway) (bool, error) { t.mu.Lock() - defer t.mu.Unlock() - if err := t.framer.fr.WriteGoAway(t.nextID-2, http2.ErrCodeNo, g.debugData); err != nil { + maxStreamID := t.nextID - 2 + t.mu.Unlock() + if err := t.framer.fr.WriteGoAway(maxStreamID, http2.ErrCodeNo, g.debugData); err != nil { return false, err } return false, g.closeConn @@ -770,7 +774,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, hdr := &headerFrame{ hf: headerFields, endStream: false, - initStream: func(id uint32) error { + initStream: func(uint32) error { t.mu.Lock() // TODO: handle transport closure in loopy instead and remove this // initStream is never called when transport is draining. @@ -983,6 +987,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. // only once on a transport. Once it is called, the transport should not be // accessed anymore. func (t *http2Client) Close(err error) { + t.conn.SetWriteDeadline(time.Now().Add(time.Second * 10)) t.mu.Lock() // Make sure we only close once. if t.state == closing { @@ -1005,18 +1010,33 @@ func (t *http2Client) Close(err error) { // should unblock it so that the goroutine eventually exits. t.kpDormancyCond.Signal() } + // Append info about previous goaways if there were any, since this may be important + // for understanding the root cause for this connection to be closed. + goAwayDebugMessage := t.goAwayDebugMessage t.mu.Unlock() + // Per HTTP/2 spec, a GOAWAY frame must be sent before closing the - // connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY. + // connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY. It + // also waits for loopyWriter to be closed with a timer to avoid the + // long blocking in case the connection is blackholed, i.e. TCP is + // just stuck. t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte("client transport shutdown"), closeConn: err}) - <-t.writerDone + timer := time.NewTimer(goAwayLoopyWriterTimeout) + defer timer.Stop() + select { + case <-t.writerDone: // success + case <-timer.C: + t.logger.Infof("Failed to write a GOAWAY frame as part of connection close after %s. Giving up and closing the transport.", goAwayLoopyWriterTimeout) + } t.cancel() t.conn.Close() + // Waits for the reader and keepalive goroutines to exit before returning to + // ensure all resources are cleaned up before Close can return. + <-t.readerDone + if t.keepaliveEnabled { + <-t.keepaliveDone + } channelz.RemoveEntry(t.channelz.ID) - // Append info about previous goaways if there were any, since this may be important - // for understanding the root cause for this connection to be closed. - _, goAwayDebugMessage := t.GetGoAwayReason() - var st *status.Status if len(goAwayDebugMessage) > 0 { st = status.Newf(codes.Unavailable, "closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage) @@ -1065,27 +1085,36 @@ func (t *http2Client) GracefulClose() { // Write formats the data into HTTP2 data frame(s) and sends it out. The caller // should proceed only if Write returns nil. -func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { +func (t *http2Client) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error { + reader := data.Reader() + if opts.Last { // If it's the last message, update stream state. if !s.compareAndSwapState(streamActive, streamWriteDone) { + _ = reader.Close() return errStreamDone } } else if s.getState() != streamActive { + _ = reader.Close() return errStreamDone } df := &dataFrame{ streamID: s.id, endStream: opts.Last, h: hdr, - d: data, + reader: reader, } - if hdr != nil || data != nil { // If it's not an empty data frame, check quota. - if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + if hdr != nil || df.reader.Remaining() != 0 { // If it's not an empty data frame, check quota. + if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil { + _ = reader.Close() return err } } - return t.controlBuf.put(df) + if err := t.controlBuf.put(df); err != nil { + _ = reader.Close() + return err + } + return nil } func (t *http2Client) getStream(f http2.Frame) *Stream { @@ -1190,10 +1219,13 @@ func (t *http2Client) handleData(f *http2.DataFrame) { // guarantee f.Data() is consumed before the arrival of next frame. // Can this copy be eliminated? if len(f.Data()) > 0 { - buffer := t.bufferPool.get() - buffer.Reset() - buffer.Write(f.Data()) - s.write(recvMsg{buffer: buffer}) + pool := t.bufferPool + if pool == nil { + // Note that this is only supposed to be nil in tests. Otherwise, stream is + // always initialized with a BufferPool. + pool = mem.DefaultBufferPool() + } + s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)}) } } // The server has closed the stream without sending trailers. Record that @@ -1222,7 +1254,7 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { if statusCode == codes.Canceled { if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) { // Our deadline was already exceeded, and that was likely the cause - // of this cancelation. Alter the status code accordingly. + // of this cancellation. Alter the status code accordingly. statusCode = codes.DeadlineExceeded } } @@ -1291,11 +1323,11 @@ func (t *http2Client) handlePing(f *http2.PingFrame) { t.controlBuf.put(pingAck) } -func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { +func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) error { t.mu.Lock() if t.state == closing { t.mu.Unlock() - return + return nil } if f.ErrCode == http2.ErrCodeEnhanceYourCalm && string(f.DebugData()) == "too_many_pings" { // When a client receives a GOAWAY with error code ENHANCE_YOUR_CALM and debug @@ -1307,8 +1339,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { id := f.LastStreamID if id > 0 && id%2 == 0 { t.mu.Unlock() - t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered numbered stream id: %v", id)) - return + return connectionErrorf(true, nil, "received goaway with non-zero even-numbered stream id: %v", id) } // A client can receive multiple GoAways from the server (see // https://github.com/grpc/grpc-go/issues/1387). The idea is that the first @@ -1325,8 +1356,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { // If there are multiple GoAways the first one should always have an ID greater than the following ones. if id > t.prevGoAwayID { t.mu.Unlock() - t.Close(connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID)) - return + return connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID) } default: t.setGoAwayReason(f) @@ -1350,8 +1380,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { t.prevGoAwayID = id if len(t.activeStreams) == 0 { t.mu.Unlock() - t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams")) - return + return connectionErrorf(true, nil, "received goaway and there are no active streams") } streamsToClose := make([]*Stream, 0) @@ -1368,6 +1397,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { for _, stream := range streamsToClose { t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) } + return nil } // setGoAwayReason sets the value of t.goAwayReason based @@ -1603,7 +1633,13 @@ func (t *http2Client) readServerPreface() error { // network connection. If the server preface is not read successfully, an // error is pushed to errCh; otherwise errCh is closed with no error. func (t *http2Client) reader(errCh chan<- error) { - defer close(t.readerDone) + var errClose error + defer func() { + close(t.readerDone) + if errClose != nil { + t.Close(errClose) + } + }() if err := t.readServerPreface(); err != nil { errCh <- err @@ -1642,11 +1678,10 @@ func (t *http2Client) reader(errCh chan<- error) { t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false) } continue - } else { - // Transport error. - t.Close(connectionErrorf(true, err, "error reading from server: %v", err)) - return } + // Transport error. + errClose = connectionErrorf(true, err, "error reading from server: %v", err) + return } switch frame := frame.(type) { case *http2.MetaHeadersFrame: @@ -1660,7 +1695,7 @@ func (t *http2Client) reader(errCh chan<- error) { case *http2.PingFrame: t.handlePing(frame) case *http2.GoAwayFrame: - t.handleGoAway(frame) + errClose = t.handleGoAway(frame) case *http2.WindowUpdateFrame: t.handleWindowUpdate(frame) default: @@ -1671,15 +1706,15 @@ func (t *http2Client) reader(errCh chan<- error) { } } -func minTime(a, b time.Duration) time.Duration { - if a < b { - return a - } - return b -} - // keepalive running in a separate goroutine makes sure the connection is alive by sending pings. func (t *http2Client) keepalive() { + var err error + defer func() { + close(t.keepaliveDone) + if err != nil { + t.Close(err) + } + }() p := &ping{data: [8]byte{}} // True iff a ping has been sent, and no data has been received since then. outstandingPing := false @@ -1703,7 +1738,7 @@ func (t *http2Client) keepalive() { continue } if outstandingPing && timeoutLeft <= 0 { - t.Close(connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout")) + err = connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout") return } t.mu.Lock() @@ -1745,7 +1780,7 @@ func (t *http2Client) keepalive() { // timeoutLeft. This will ensure that we wait only for kp.Time // before sending out the next ping (for cases where the ping is // acked). - sleepDuration := minTime(t.kp.Time, timeoutLeft) + sleepDuration := min(t.kp.Time, timeoutLeft) timeoutLeft -= sleepDuration timer.Reset(sleepDuration) case <-t.ctx.Done(): diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index b7091165b5013..584b50fe55302 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -39,6 +39,7 @@ import ( "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/syscall" + "google.golang.org/grpc/mem" "google.golang.org/protobuf/proto" "google.golang.org/grpc/codes" @@ -119,7 +120,7 @@ type http2Server struct { // Fields below are for channelz metric collection. channelz *channelz.Socket - bufferPool *bufferPool + bufferPool mem.BufferPool connectionID uint64 @@ -261,7 +262,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, idle: time.Now(), kep: kep, initialWindowSize: iwz, - bufferPool: newBufferPool(), + bufferPool: config.BufferPool, } var czSecurity credentials.ChannelzSecurityValue if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok { @@ -330,7 +331,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, t.handleSettings(sf) go func() { - t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler) + t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool) err := t.loopy.run() close(t.loopyWriterDone) if !isIOError(err) { @@ -613,10 +614,9 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) s.trReader = &transportReader{ reader: &recvBufferReader{ - ctx: s.ctx, - ctxDone: s.ctxDone, - recv: s.buf, - freeBuffer: t.bufferPool.put, + ctx: s.ctx, + ctxDone: s.ctxDone, + recv: s.buf, }, windowHandler: func(n int) { t.updateWindow(s, uint32(n)) @@ -813,10 +813,13 @@ func (t *http2Server) handleData(f *http2.DataFrame) { // guarantee f.Data() is consumed before the arrival of next frame. // Can this copy be eliminated? if len(f.Data()) > 0 { - buffer := t.bufferPool.get() - buffer.Reset() - buffer.Write(f.Data()) - s.write(recvMsg{buffer: buffer}) + pool := t.bufferPool + if pool == nil { + // Note that this is only supposed to be nil in tests. Otherwise, stream is + // always initialized with a BufferPool. + pool = mem.DefaultBufferPool() + } + s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)}) } } if f.StreamEnded() { @@ -1089,7 +1092,9 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { onWrite: t.setResetPingStrikes, } - success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader) + success, err := t.controlBuf.executeAndPut(func() bool { + return t.checkForHeaderListSize(trailingHeader) + }, nil) if !success { if err != nil { return err @@ -1112,27 +1117,37 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { // Write converts the data into HTTP2 data frame and sends it out. Non-nil error // is returns if it fails (e.g., framing error, transport error). -func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { +func (t *http2Server) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error { + reader := data.Reader() + if !s.isHeaderSent() { // Headers haven't been written yet. if err := t.WriteHeader(s, nil); err != nil { + _ = reader.Close() return err } } else { // Writing headers checks for this condition. if s.getState() == streamDone { + _ = reader.Close() return t.streamContextErr(s) } } + df := &dataFrame{ streamID: s.id, h: hdr, - d: data, + reader: reader, onEachWrite: t.setResetPingStrikes, } - if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil { + _ = reader.Close() return t.streamContextErr(s) } - return t.controlBuf.put(df) + if err := t.controlBuf.put(df); err != nil { + _ = reader.Close() + return err + } + return nil } // keepalive running in a separate goroutine does the following: @@ -1223,7 +1238,7 @@ func (t *http2Server) keepalive() { // timeoutLeft. This will ensure that we wait only for kp.Time // before sending out the next ping (for cases where the ping is // acked). - sleepDuration := minTime(t.kp.Time, kpTimeoutLeft) + sleepDuration := min(t.kp.Time, kpTimeoutLeft) kpTimeoutLeft -= sleepDuration kpTimer.Reset(sleepDuration) case <-t.done: diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index 39cef3bd442eb..3613d7b64817d 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -317,28 +317,32 @@ func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter { return w } -func (w *bufWriter) Write(b []byte) (n int, err error) { +func (w *bufWriter) Write(b []byte) (int, error) { if w.err != nil { return 0, w.err } if w.batchSize == 0 { // Buffer has been disabled. - n, err = w.conn.Write(b) + n, err := w.conn.Write(b) return n, toIOError(err) } if w.buf == nil { b := w.pool.Get().(*[]byte) w.buf = *b } + written := 0 for len(b) > 0 { - nn := copy(w.buf[w.offset:], b) - b = b[nn:] - w.offset += nn - n += nn - if w.offset >= w.batchSize { - err = w.flushKeepBuffer() + copied := copy(w.buf[w.offset:], b) + b = b[copied:] + written += copied + w.offset += copied + if w.offset < w.batchSize { + continue + } + if err := w.flushKeepBuffer(); err != nil { + return written, err } } - return n, err + return written, nil } func (w *bufWriter) Flush() error { @@ -389,7 +393,7 @@ type framer struct { fr *http2.Framer } -var writeBufferPoolMap map[int]*sync.Pool = make(map[int]*sync.Pool) +var writeBufferPoolMap = make(map[int]*sync.Pool) var writeBufferMutex sync.Mutex func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer { diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go index 24fa1032574cb..54b2244365444 100644 --- a/vendor/google.golang.org/grpc/internal/transport/proxy.go +++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go @@ -107,8 +107,14 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri } return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump) } - - return &bufConn{Conn: conn, r: r}, nil + // The buffer could contain extra bytes from the target server, so we can't + // discard it. However, in many cases where the server waits for the client + // to send the first message (e.g. when TLS is being used), the buffer will + // be empty, so we can avoid the overhead of reading through this buffer. + if r.Buffered() != 0 { + return &bufConn{Conn: conn, r: r}, nil + } + return conn, nil } // proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 4b39c0ade97c0..e12cb0bc914bd 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -22,7 +22,6 @@ package transport import ( - "bytes" "context" "errors" "fmt" @@ -37,6 +36,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" @@ -47,32 +47,10 @@ import ( const logLevel = 2 -type bufferPool struct { - pool sync.Pool -} - -func newBufferPool() *bufferPool { - return &bufferPool{ - pool: sync.Pool{ - New: func() any { - return new(bytes.Buffer) - }, - }, - } -} - -func (p *bufferPool) get() *bytes.Buffer { - return p.pool.Get().(*bytes.Buffer) -} - -func (p *bufferPool) put(b *bytes.Buffer) { - p.pool.Put(b) -} - // recvMsg represents the received msg from the transport. All transport // protocol specific info has been removed. type recvMsg struct { - buffer *bytes.Buffer + buffer mem.Buffer // nil: received some data // io.EOF: stream is completed. data is nil. // other non-nil error: transport failure. data is nil. @@ -102,6 +80,9 @@ func newRecvBuffer() *recvBuffer { func (b *recvBuffer) put(r recvMsg) { b.mu.Lock() if b.err != nil { + // drop the buffer on the floor. Since b.err is not nil, any subsequent reads + // will always return an error, making this buffer inaccessible. + r.buffer.Free() b.mu.Unlock() // An error had occurred earlier, don't accept more // data or errors. @@ -148,45 +129,97 @@ type recvBufferReader struct { ctx context.Context ctxDone <-chan struct{} // cache of ctx.Done() (for performance). recv *recvBuffer - last *bytes.Buffer // Stores the remaining data in the previous calls. + last mem.Buffer // Stores the remaining data in the previous calls. err error - freeBuffer func(*bytes.Buffer) } -// Read reads the next len(p) bytes from last. If last is drained, it tries to -// read additional data from recv. It blocks if there no additional data available -// in recv. If Read returns any non-nil error, it will continue to return that error. -func (r *recvBufferReader) Read(p []byte) (n int, err error) { +func (r *recvBufferReader) ReadHeader(header []byte) (n int, err error) { if r.err != nil { return 0, r.err } if r.last != nil { - // Read remaining data left in last call. - copied, _ := r.last.Read(p) - if r.last.Len() == 0 { - r.freeBuffer(r.last) + n, r.last = mem.ReadUnsafe(header, r.last) + return n, nil + } + if r.closeStream != nil { + n, r.err = r.readHeaderClient(header) + } else { + n, r.err = r.readHeader(header) + } + return n, r.err +} + +// Read reads the next n bytes from last. If last is drained, it tries to read +// additional data from recv. It blocks if there no additional data available in +// recv. If Read returns any non-nil error, it will continue to return that +// error. +func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) { + if r.err != nil { + return nil, r.err + } + if r.last != nil { + buf = r.last + if r.last.Len() > n { + buf, r.last = mem.SplitUnsafe(buf, n) + } else { r.last = nil } - return copied, nil + return buf, nil } if r.closeStream != nil { - n, r.err = r.readClient(p) + buf, r.err = r.readClient(n) } else { - n, r.err = r.read(p) + buf, r.err = r.read(n) } - return n, r.err + return buf, r.err } -func (r *recvBufferReader) read(p []byte) (n int, err error) { +func (r *recvBufferReader) readHeader(header []byte) (n int, err error) { select { case <-r.ctxDone: return 0, ContextErr(r.ctx.Err()) case m := <-r.recv.get(): - return r.readAdditional(m, p) + return r.readHeaderAdditional(m, header) + } +} + +func (r *recvBufferReader) read(n int) (buf mem.Buffer, err error) { + select { + case <-r.ctxDone: + return nil, ContextErr(r.ctx.Err()) + case m := <-r.recv.get(): + return r.readAdditional(m, n) + } +} + +func (r *recvBufferReader) readHeaderClient(header []byte) (n int, err error) { + // If the context is canceled, then closes the stream with nil metadata. + // closeStream writes its error parameter to r.recv as a recvMsg. + // r.readAdditional acts on that message and returns the necessary error. + select { + case <-r.ctxDone: + // Note that this adds the ctx error to the end of recv buffer, and + // reads from the head. This will delay the error until recv buffer is + // empty, thus will delay ctx cancellation in Recv(). + // + // It's done this way to fix a race between ctx cancel and trailer. The + // race was, stream.Recv() may return ctx error if ctxDone wins the + // race, but stream.Trailer() may return a non-nil md because the stream + // was not marked as done when trailer is received. This closeStream + // call will mark stream as done, thus fix the race. + // + // TODO: delaying ctx error seems like a unnecessary side effect. What + // we really want is to mark the stream as done, and return ctx error + // faster. + r.closeStream(ContextErr(r.ctx.Err())) + m := <-r.recv.get() + return r.readHeaderAdditional(m, header) + case m := <-r.recv.get(): + return r.readHeaderAdditional(m, header) } } -func (r *recvBufferReader) readClient(p []byte) (n int, err error) { +func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) { // If the context is canceled, then closes the stream with nil metadata. // closeStream writes its error parameter to r.recv as a recvMsg. // r.readAdditional acts on that message and returns the necessary error. @@ -207,25 +240,40 @@ func (r *recvBufferReader) readClient(p []byte) (n int, err error) { // faster. r.closeStream(ContextErr(r.ctx.Err())) m := <-r.recv.get() - return r.readAdditional(m, p) + return r.readAdditional(m, n) case m := <-r.recv.get(): - return r.readAdditional(m, p) + return r.readAdditional(m, n) } } -func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error) { +func (r *recvBufferReader) readHeaderAdditional(m recvMsg, header []byte) (n int, err error) { r.recv.load() if m.err != nil { + if m.buffer != nil { + m.buffer.Free() + } return 0, m.err } - copied, _ := m.buffer.Read(p) - if m.buffer.Len() == 0 { - r.freeBuffer(m.buffer) - r.last = nil - } else { - r.last = m.buffer + + n, r.last = mem.ReadUnsafe(header, m.buffer) + + return n, nil +} + +func (r *recvBufferReader) readAdditional(m recvMsg, n int) (b mem.Buffer, err error) { + r.recv.load() + if m.err != nil { + if m.buffer != nil { + m.buffer.Free() + } + return nil, m.err + } + + if m.buffer.Len() > n { + m.buffer, r.last = mem.SplitUnsafe(m.buffer, n) } - return copied, nil + + return m.buffer, nil } type streamState uint32 @@ -241,7 +289,7 @@ const ( type Stream struct { id uint32 st ServerTransport // nil for client side Stream - ct *http2Client // nil for server side Stream + ct ClientTransport // nil for server side Stream ctx context.Context // the associated context of the stream cancel context.CancelFunc // always nil for client side Stream done chan struct{} // closed at the end of stream to unblock writers. On the client side. @@ -251,7 +299,7 @@ type Stream struct { recvCompress string sendCompress string buf *recvBuffer - trReader io.Reader + trReader *transportReader fc *inFlow wq *writeQuota @@ -408,7 +456,7 @@ func (s *Stream) TrailersOnly() bool { return s.noHeaders } -// Trailer returns the cached trailer metedata. Note that if it is not called +// Trailer returns the cached trailer metadata. Note that if it is not called // after the entire stream is done, it could return an empty MD. Client // side only. // It can be safely read only after stream has ended that is either read @@ -499,36 +547,96 @@ func (s *Stream) write(m recvMsg) { s.buf.put(m) } -// Read reads all p bytes from the wire for this stream. -func (s *Stream) Read(p []byte) (n int, err error) { +// ReadHeader reads data into the provided header slice from the stream. It +// first checks if there was an error during a previous read operation and +// returns it if present. It then requests a read operation for the length of +// the header. It continues to read from the stream until the entire header +// slice is filled or an error occurs. If an `io.EOF` error is encountered +// with partially read data, it is converted to `io.ErrUnexpectedEOF` to +// indicate an unexpected end of the stream. The method returns any error +// encountered during the read process or nil if the header was successfully +// read. +func (s *Stream) ReadHeader(header []byte) (err error) { + // Don't request a read if there was an error earlier + if er := s.trReader.er; er != nil { + return er + } + s.requestRead(len(header)) + for len(header) != 0 { + n, err := s.trReader.ReadHeader(header) + header = header[n:] + if len(header) == 0 { + err = nil + } + if err != nil { + if n > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + } + return nil +} + +// Read reads n bytes from the wire for this stream. +func (s *Stream) Read(n int) (data mem.BufferSlice, err error) { // Don't request a read if there was an error earlier - if er := s.trReader.(*transportReader).er; er != nil { - return 0, er + if er := s.trReader.er; er != nil { + return nil, er } - s.requestRead(len(p)) - return io.ReadFull(s.trReader, p) + s.requestRead(n) + for n != 0 { + buf, err := s.trReader.Read(n) + var bufLen int + if buf != nil { + bufLen = buf.Len() + } + n -= bufLen + if n == 0 { + err = nil + } + if err != nil { + if bufLen > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + data.Free() + return nil, err + } + data = append(data, buf) + } + return data, nil } -// tranportReader reads all the data available for this Stream from the transport and +// transportReader reads all the data available for this Stream from the transport and // passes them into the decoder, which converts them into a gRPC message stream. // The error is io.EOF when the stream is done or another non-nil error if // the stream broke. type transportReader struct { - reader io.Reader + reader *recvBufferReader // The handler to control the window update procedure for both this // particular stream and the associated transport. windowHandler func(int) er error } -func (t *transportReader) Read(p []byte) (n int, err error) { - n, err = t.reader.Read(p) +func (t *transportReader) ReadHeader(header []byte) (int, error) { + n, err := t.reader.ReadHeader(header) if err != nil { t.er = err - return + return 0, err } t.windowHandler(n) - return + return n, nil +} + +func (t *transportReader) Read(n int) (mem.Buffer, error) { + buf, err := t.reader.Read(n) + if err != nil { + t.er = err + return buf, err + } + t.windowHandler(buf.Len()) + return buf, nil } // BytesReceived indicates whether any bytes have been received on this stream. @@ -574,6 +682,7 @@ type ServerConfig struct { ChannelzParent *channelz.Server MaxHeaderListSize *uint32 HeaderTableSize *uint32 + BufferPool mem.BufferPool } // ConnectOptions covers all relevant options for communicating with the server. @@ -612,6 +721,8 @@ type ConnectOptions struct { MaxHeaderListSize *uint32 // UseProxy specifies if a proxy should be used. UseProxy bool + // The mem.BufferPool to use when reading/writing to the wire. + BufferPool mem.BufferPool } // NewClientTransport establishes the transport with the required ConnectOptions @@ -673,7 +784,7 @@ type ClientTransport interface { // Write sends the data for the given stream. A nil stream indicates // the write is to be performed on the transport as a whole. - Write(s *Stream, hdr []byte, data []byte, opts *Options) error + Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error // NewStream creates a Stream for an RPC. NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) @@ -725,7 +836,7 @@ type ServerTransport interface { // Write sends the data for the given stream. // Write may not be called on all streams. - Write(s *Stream, hdr []byte, data []byte, opts *Options) error + Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error // WriteStatus sends the status of a stream to the client. WriteStatus is // the final call made on a stream and always occurs. @@ -798,7 +909,7 @@ var ( // connection is draining. This could be caused by goaway or balancer // removing the address. errStreamDrain = status.Error(codes.Unavailable, "the connection is draining") - // errStreamDone is returned from write at the client side to indiacte application + // errStreamDone is returned from write at the client side to indicate application // layer of an error. errStreamDone = errors.New("the stream is done") // StatusGoAway indicates that the server sent a GOAWAY that included this diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go index 34d31b5e7d311..eb42b19fb99a1 100644 --- a/vendor/google.golang.org/grpc/keepalive/keepalive.go +++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go @@ -34,15 +34,29 @@ type ClientParameters struct { // After a duration of this time if the client doesn't see any activity it // pings the server to see if the transport is still alive. // If set below 10s, a minimum value of 10s will be used instead. - Time time.Duration // The current default value is infinity. + // + // Note that gRPC servers have a default EnforcementPolicy.MinTime of 5 + // minutes (which means the client shouldn't ping more frequently than every + // 5 minutes). + // + // Though not ideal, it's not a strong requirement for Time to be less than + // EnforcementPolicy.MinTime. Time will automatically double if the server + // disconnects due to its enforcement policy. + // + // For more details, see + // https://github.com/grpc/proposal/blob/master/A8-client-side-keepalive.md + Time time.Duration // After having pinged for keepalive check, the client waits for a duration // of Timeout and if no activity is seen even after that the connection is // closed. - Timeout time.Duration // The current default value is 20 seconds. + // + // If keepalive is enabled, and this value is not explicitly set, the default + // is 20 seconds. + Timeout time.Duration // If true, client sends keepalive pings even with no active RPCs. If false, // when there are no active RPCs, Time and Timeout will be ignored and no // keepalive pings will be sent. - PermitWithoutStream bool // false by default. + PermitWithoutStream bool } // ServerParameters is used to set keepalive and max-age parameters on the diff --git a/vendor/google.golang.org/grpc/mem/buffer_pool.go b/vendor/google.golang.org/grpc/mem/buffer_pool.go new file mode 100644 index 0000000000000..c37c58c0233ec --- /dev/null +++ b/vendor/google.golang.org/grpc/mem/buffer_pool.go @@ -0,0 +1,194 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package mem + +import ( + "sort" + "sync" + + "google.golang.org/grpc/internal" +) + +// BufferPool is a pool of buffers that can be shared and reused, resulting in +// decreased memory allocation. +type BufferPool interface { + // Get returns a buffer with specified length from the pool. + Get(length int) *[]byte + + // Put returns a buffer to the pool. + Put(*[]byte) +} + +var defaultBufferPoolSizes = []int{ + 256, + 4 << 10, // 4KB (go page size) + 16 << 10, // 16KB (max HTTP/2 frame size used by gRPC) + 32 << 10, // 32KB (default buffer size for io.Copy) + 1 << 20, // 1MB +} + +var defaultBufferPool BufferPool + +func init() { + defaultBufferPool = NewTieredBufferPool(defaultBufferPoolSizes...) + + internal.SetDefaultBufferPoolForTesting = func(pool BufferPool) { + defaultBufferPool = pool + } + + internal.SetBufferPoolingThresholdForTesting = func(threshold int) { + bufferPoolingThreshold = threshold + } +} + +// DefaultBufferPool returns the current default buffer pool. It is a BufferPool +// created with NewBufferPool that uses a set of default sizes optimized for +// expected workflows. +func DefaultBufferPool() BufferPool { + return defaultBufferPool +} + +// NewTieredBufferPool returns a BufferPool implementation that uses multiple +// underlying pools of the given pool sizes. +func NewTieredBufferPool(poolSizes ...int) BufferPool { + sort.Ints(poolSizes) + pools := make([]*sizedBufferPool, len(poolSizes)) + for i, s := range poolSizes { + pools[i] = newSizedBufferPool(s) + } + return &tieredBufferPool{ + sizedPools: pools, + } +} + +// tieredBufferPool implements the BufferPool interface with multiple tiers of +// buffer pools for different sizes of buffers. +type tieredBufferPool struct { + sizedPools []*sizedBufferPool + fallbackPool simpleBufferPool +} + +func (p *tieredBufferPool) Get(size int) *[]byte { + return p.getPool(size).Get(size) +} + +func (p *tieredBufferPool) Put(buf *[]byte) { + p.getPool(cap(*buf)).Put(buf) +} + +func (p *tieredBufferPool) getPool(size int) BufferPool { + poolIdx := sort.Search(len(p.sizedPools), func(i int) bool { + return p.sizedPools[i].defaultSize >= size + }) + + if poolIdx == len(p.sizedPools) { + return &p.fallbackPool + } + + return p.sizedPools[poolIdx] +} + +// sizedBufferPool is a BufferPool implementation that is optimized for specific +// buffer sizes. For example, HTTP/2 frames within gRPC have a default max size +// of 16kb and a sizedBufferPool can be configured to only return buffers with a +// capacity of 16kb. Note that however it does not support returning larger +// buffers and in fact panics if such a buffer is requested. Because of this, +// this BufferPool implementation is not meant to be used on its own and rather +// is intended to be embedded in a tieredBufferPool such that Get is only +// invoked when the required size is smaller than or equal to defaultSize. +type sizedBufferPool struct { + pool sync.Pool + defaultSize int +} + +func (p *sizedBufferPool) Get(size int) *[]byte { + buf := p.pool.Get().(*[]byte) + b := *buf + clear(b[:cap(b)]) + *buf = b[:size] + return buf +} + +func (p *sizedBufferPool) Put(buf *[]byte) { + if cap(*buf) < p.defaultSize { + // Ignore buffers that are too small to fit in the pool. Otherwise, when + // Get is called it will panic as it tries to index outside the bounds + // of the buffer. + return + } + p.pool.Put(buf) +} + +func newSizedBufferPool(size int) *sizedBufferPool { + return &sizedBufferPool{ + pool: sync.Pool{ + New: func() any { + buf := make([]byte, size) + return &buf + }, + }, + defaultSize: size, + } +} + +var _ BufferPool = (*simpleBufferPool)(nil) + +// simpleBufferPool is an implementation of the BufferPool interface that +// attempts to pool buffers with a sync.Pool. When Get is invoked, it tries to +// acquire a buffer from the pool but if that buffer is too small, it returns it +// to the pool and creates a new one. +type simpleBufferPool struct { + pool sync.Pool +} + +func (p *simpleBufferPool) Get(size int) *[]byte { + bs, ok := p.pool.Get().(*[]byte) + if ok && cap(*bs) >= size { + *bs = (*bs)[:size] + return bs + } + + // A buffer was pulled from the pool, but it is too small. Put it back in + // the pool and create one large enough. + if ok { + p.pool.Put(bs) + } + + b := make([]byte, size) + return &b +} + +func (p *simpleBufferPool) Put(buf *[]byte) { + p.pool.Put(buf) +} + +var _ BufferPool = NopBufferPool{} + +// NopBufferPool is a buffer pool that returns new buffers without pooling. +type NopBufferPool struct{} + +// Get returns a buffer with specified length from the pool. +func (NopBufferPool) Get(length int) *[]byte { + b := make([]byte, length) + return &b +} + +// Put returns a buffer to the pool. +func (NopBufferPool) Put(*[]byte) { +} diff --git a/vendor/google.golang.org/grpc/mem/buffer_slice.go b/vendor/google.golang.org/grpc/mem/buffer_slice.go new file mode 100644 index 0000000000000..228e9c2f20f26 --- /dev/null +++ b/vendor/google.golang.org/grpc/mem/buffer_slice.go @@ -0,0 +1,226 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package mem + +import ( + "io" +) + +// BufferSlice offers a means to represent data that spans one or more Buffer +// instances. A BufferSlice is meant to be immutable after creation, and methods +// like Ref create and return copies of the slice. This is why all methods have +// value receivers rather than pointer receivers. +// +// Note that any of the methods that read the underlying buffers such as Ref, +// Len or CopyTo etc., will panic if any underlying buffers have already been +// freed. It is recommended to not directly interact with any of the underlying +// buffers directly, rather such interactions should be mediated through the +// various methods on this type. +// +// By convention, any APIs that return (mem.BufferSlice, error) should reduce +// the burden on the caller by never returning a mem.BufferSlice that needs to +// be freed if the error is non-nil, unless explicitly stated. +type BufferSlice []Buffer + +// Len returns the sum of the length of all the Buffers in this slice. +// +// # Warning +// +// Invoking the built-in len on a BufferSlice will return the number of buffers +// in the slice, and *not* the value returned by this function. +func (s BufferSlice) Len() int { + var length int + for _, b := range s { + length += b.Len() + } + return length +} + +// Ref invokes Ref on each buffer in the slice. +func (s BufferSlice) Ref() { + for _, b := range s { + b.Ref() + } +} + +// Free invokes Buffer.Free() on each Buffer in the slice. +func (s BufferSlice) Free() { + for _, b := range s { + b.Free() + } +} + +// CopyTo copies each of the underlying Buffer's data into the given buffer, +// returning the number of bytes copied. Has the same semantics as the copy +// builtin in that it will copy as many bytes as it can, stopping when either dst +// is full or s runs out of data, returning the minimum of s.Len() and len(dst). +func (s BufferSlice) CopyTo(dst []byte) int { + off := 0 + for _, b := range s { + off += copy(dst[off:], b.ReadOnlyData()) + } + return off +} + +// Materialize concatenates all the underlying Buffer's data into a single +// contiguous buffer using CopyTo. +func (s BufferSlice) Materialize() []byte { + l := s.Len() + if l == 0 { + return nil + } + out := make([]byte, l) + s.CopyTo(out) + return out +} + +// MaterializeToBuffer functions like Materialize except that it writes the data +// to a single Buffer pulled from the given BufferPool. +// +// As a special case, if the input BufferSlice only actually has one Buffer, this +// function simply increases the refcount before returning said Buffer. Freeing this +// buffer won't release it until the BufferSlice is itself released. +func (s BufferSlice) MaterializeToBuffer(pool BufferPool) Buffer { + if len(s) == 1 { + s[0].Ref() + return s[0] + } + sLen := s.Len() + if sLen == 0 { + return emptyBuffer{} + } + buf := pool.Get(sLen) + s.CopyTo(*buf) + return NewBuffer(buf, pool) +} + +// Reader returns a new Reader for the input slice after taking references to +// each underlying buffer. +func (s BufferSlice) Reader() Reader { + s.Ref() + return &sliceReader{ + data: s, + len: s.Len(), + } +} + +// Reader exposes a BufferSlice's data as an io.Reader, allowing it to interface +// with other parts systems. It also provides an additional convenience method +// Remaining(), which returns the number of unread bytes remaining in the slice. +// Buffers will be freed as they are read. +type Reader interface { + io.Reader + io.ByteReader + // Close frees the underlying BufferSlice and never returns an error. Subsequent + // calls to Read will return (0, io.EOF). + Close() error + // Remaining returns the number of unread bytes remaining in the slice. + Remaining() int +} + +type sliceReader struct { + data BufferSlice + len int + // The index into data[0].ReadOnlyData(). + bufferIdx int +} + +func (r *sliceReader) Remaining() int { + return r.len +} + +func (r *sliceReader) Close() error { + r.data.Free() + r.data = nil + r.len = 0 + return nil +} + +func (r *sliceReader) freeFirstBufferIfEmpty() bool { + if len(r.data) == 0 || r.bufferIdx != len(r.data[0].ReadOnlyData()) { + return false + } + + r.data[0].Free() + r.data = r.data[1:] + r.bufferIdx = 0 + return true +} + +func (r *sliceReader) Read(buf []byte) (n int, _ error) { + if r.len == 0 { + return 0, io.EOF + } + + for len(buf) != 0 && r.len != 0 { + // Copy as much as possible from the first Buffer in the slice into the + // given byte slice. + data := r.data[0].ReadOnlyData() + copied := copy(buf, data[r.bufferIdx:]) + r.len -= copied // Reduce len by the number of bytes copied. + r.bufferIdx += copied // Increment the buffer index. + n += copied // Increment the total number of bytes read. + buf = buf[copied:] // Shrink the given byte slice. + + // If we have copied all the data from the first Buffer, free it and advance to + // the next in the slice. + r.freeFirstBufferIfEmpty() + } + + return n, nil +} + +func (r *sliceReader) ReadByte() (byte, error) { + if r.len == 0 { + return 0, io.EOF + } + + // There may be any number of empty buffers in the slice, clear them all until a + // non-empty buffer is reached. This is guaranteed to exit since r.len is not 0. + for r.freeFirstBufferIfEmpty() { + } + + b := r.data[0].ReadOnlyData()[r.bufferIdx] + r.len-- + r.bufferIdx++ + // Free the first buffer in the slice if the last byte was read + r.freeFirstBufferIfEmpty() + return b, nil +} + +var _ io.Writer = (*writer)(nil) + +type writer struct { + buffers *BufferSlice + pool BufferPool +} + +func (w *writer) Write(p []byte) (n int, err error) { + b := Copy(p, w.pool) + *w.buffers = append(*w.buffers, b) + return b.Len(), nil +} + +// NewWriter wraps the given BufferSlice and BufferPool to implement the +// io.Writer interface. Every call to Write copies the contents of the given +// buffer into a new Buffer pulled from the given pool and the Buffer is added to +// the given BufferSlice. +func NewWriter(buffers *BufferSlice, pool BufferPool) io.Writer { + return &writer{buffers: buffers, pool: pool} +} diff --git a/vendor/google.golang.org/grpc/mem/buffers.go b/vendor/google.golang.org/grpc/mem/buffers.go new file mode 100644 index 0000000000000..ecbf0b9a73ea3 --- /dev/null +++ b/vendor/google.golang.org/grpc/mem/buffers.go @@ -0,0 +1,268 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package mem provides utilities that facilitate memory reuse in byte slices +// that are used as buffers. +// +// # Experimental +// +// Notice: All APIs in this package are EXPERIMENTAL and may be changed or +// removed in a later release. +package mem + +import ( + "fmt" + "sync" + "sync/atomic" +) + +// A Buffer represents a reference counted piece of data (in bytes) that can be +// acquired by a call to NewBuffer() or Copy(). A reference to a Buffer may be +// released by calling Free(), which invokes the free function given at creation +// only after all references are released. +// +// Note that a Buffer is not safe for concurrent access and instead each +// goroutine should use its own reference to the data, which can be acquired via +// a call to Ref(). +// +// Attempts to access the underlying data after releasing the reference to the +// Buffer will panic. +type Buffer interface { + // ReadOnlyData returns the underlying byte slice. Note that it is undefined + // behavior to modify the contents of this slice in any way. + ReadOnlyData() []byte + // Ref increases the reference counter for this Buffer. + Ref() + // Free decrements this Buffer's reference counter and frees the underlying + // byte slice if the counter reaches 0 as a result of this call. + Free() + // Len returns the Buffer's size. + Len() int + + split(n int) (left, right Buffer) + read(buf []byte) (int, Buffer) +} + +var ( + bufferPoolingThreshold = 1 << 10 + + bufferObjectPool = sync.Pool{New: func() any { return new(buffer) }} + refObjectPool = sync.Pool{New: func() any { return new(atomic.Int32) }} +) + +// IsBelowBufferPoolingThreshold returns true if the given size is less than or +// equal to the threshold for buffer pooling. This is used to determine whether +// to pool buffers or allocate them directly. +func IsBelowBufferPoolingThreshold(size int) bool { + return size <= bufferPoolingThreshold +} + +type buffer struct { + origData *[]byte + data []byte + refs *atomic.Int32 + pool BufferPool +} + +func newBuffer() *buffer { + return bufferObjectPool.Get().(*buffer) +} + +// NewBuffer creates a new Buffer from the given data, initializing the reference +// counter to 1. The data will then be returned to the given pool when all +// references to the returned Buffer are released. As a special case to avoid +// additional allocations, if the given buffer pool is nil, the returned buffer +// will be a "no-op" Buffer where invoking Buffer.Free() does nothing and the +// underlying data is never freed. +// +// Note that the backing array of the given data is not copied. +func NewBuffer(data *[]byte, pool BufferPool) Buffer { + // Use the buffer's capacity instead of the length, otherwise buffers may + // not be reused under certain conditions. For example, if a large buffer + // is acquired from the pool, but fewer bytes than the buffering threshold + // are written to it, the buffer will not be returned to the pool. + if pool == nil || IsBelowBufferPoolingThreshold(cap(*data)) { + return (SliceBuffer)(*data) + } + b := newBuffer() + b.origData = data + b.data = *data + b.pool = pool + b.refs = refObjectPool.Get().(*atomic.Int32) + b.refs.Add(1) + return b +} + +// Copy creates a new Buffer from the given data, initializing the reference +// counter to 1. +// +// It acquires a []byte from the given pool and copies over the backing array +// of the given data. The []byte acquired from the pool is returned to the +// pool when all references to the returned Buffer are released. +func Copy(data []byte, pool BufferPool) Buffer { + if IsBelowBufferPoolingThreshold(len(data)) { + buf := make(SliceBuffer, len(data)) + copy(buf, data) + return buf + } + + buf := pool.Get(len(data)) + copy(*buf, data) + return NewBuffer(buf, pool) +} + +func (b *buffer) ReadOnlyData() []byte { + if b.refs == nil { + panic("Cannot read freed buffer") + } + return b.data +} + +func (b *buffer) Ref() { + if b.refs == nil { + panic("Cannot ref freed buffer") + } + b.refs.Add(1) +} + +func (b *buffer) Free() { + if b.refs == nil { + panic("Cannot free freed buffer") + } + + refs := b.refs.Add(-1) + switch { + case refs > 0: + return + case refs == 0: + if b.pool != nil { + b.pool.Put(b.origData) + } + + refObjectPool.Put(b.refs) + b.origData = nil + b.data = nil + b.refs = nil + b.pool = nil + bufferObjectPool.Put(b) + default: + panic("Cannot free freed buffer") + } +} + +func (b *buffer) Len() int { + return len(b.ReadOnlyData()) +} + +func (b *buffer) split(n int) (Buffer, Buffer) { + if b.refs == nil { + panic("Cannot split freed buffer") + } + + b.refs.Add(1) + split := newBuffer() + split.origData = b.origData + split.data = b.data[n:] + split.refs = b.refs + split.pool = b.pool + + b.data = b.data[:n] + + return b, split +} + +func (b *buffer) read(buf []byte) (int, Buffer) { + if b.refs == nil { + panic("Cannot read freed buffer") + } + + n := copy(buf, b.data) + if n == len(b.data) { + b.Free() + return n, nil + } + + b.data = b.data[n:] + return n, b +} + +func (b *buffer) String() string { + return fmt.Sprintf("mem.Buffer(%p, data: %p, length: %d)", b, b.ReadOnlyData(), len(b.ReadOnlyData())) +} + +// ReadUnsafe reads bytes from the given Buffer into the provided slice. +// It does not perform safety checks. +func ReadUnsafe(dst []byte, buf Buffer) (int, Buffer) { + return buf.read(dst) +} + +// SplitUnsafe modifies the receiver to point to the first n bytes while it +// returns a new reference to the remaining bytes. The returned Buffer +// functions just like a normal reference acquired using Ref(). +func SplitUnsafe(buf Buffer, n int) (left, right Buffer) { + return buf.split(n) +} + +type emptyBuffer struct{} + +func (e emptyBuffer) ReadOnlyData() []byte { + return nil +} + +func (e emptyBuffer) Ref() {} +func (e emptyBuffer) Free() {} + +func (e emptyBuffer) Len() int { + return 0 +} + +func (e emptyBuffer) split(int) (left, right Buffer) { + return e, e +} + +func (e emptyBuffer) read([]byte) (int, Buffer) { + return 0, e +} + +// SliceBuffer is a Buffer implementation that wraps a byte slice. It provides +// methods for reading, splitting, and managing the byte slice. +type SliceBuffer []byte + +// ReadOnlyData returns the byte slice. +func (s SliceBuffer) ReadOnlyData() []byte { return s } + +// Ref is a noop implementation of Ref. +func (s SliceBuffer) Ref() {} + +// Free is a noop implementation of Free. +func (s SliceBuffer) Free() {} + +// Len is a noop implementation of Len. +func (s SliceBuffer) Len() int { return len(s) } + +func (s SliceBuffer) split(n int) (left, right Buffer) { + return s[:n], s[n:] +} + +func (s SliceBuffer) read(buf []byte) (int, Buffer) { + n := copy(buf, s) + if n == len(s) { + return n, nil + } + return n, s[n:] +} diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index 1e9485fd6e268..d2e15253bbfbc 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -213,11 +213,6 @@ func FromIncomingContext(ctx context.Context) (MD, bool) { // ValueFromIncomingContext returns the metadata value corresponding to the metadata // key from the incoming metadata if it exists. Keys are matched in a case insensitive // manner. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. func ValueFromIncomingContext(ctx context.Context, key string) []string { md, ok := ctx.Value(mdIncomingKey{}).(MD) if !ok { @@ -228,7 +223,7 @@ func ValueFromIncomingContext(ctx context.Context, key string) []string { return copyOf(v) } for k, v := range md { - // Case insenitive comparison: MD is a map, and there's no guarantee + // Case insensitive comparison: MD is a map, and there's no guarantee // that the MD attached to the context is created using our helper // functions. if strings.EqualFold(k, key) { diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go index 73bd63364335e..e87a17f36a50b 100644 --- a/vendor/google.golang.org/grpc/preloader.go +++ b/vendor/google.golang.org/grpc/preloader.go @@ -20,6 +20,7 @@ package grpc import ( "google.golang.org/grpc/codes" + "google.golang.org/grpc/mem" "google.golang.org/grpc/status" ) @@ -31,9 +32,10 @@ import ( // later release. type PreparedMsg struct { // Struct for preparing msg before sending them - encodedData []byte + encodedData mem.BufferSlice hdr []byte - payload []byte + payload mem.BufferSlice + pf payloadFormat } // Encode marshalls and compresses the message using the codec and compressor for the stream. @@ -57,11 +59,27 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error { if err != nil { return err } - p.encodedData = data - compData, err := compress(data, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp) + + materializedData := data.Materialize() + data.Free() + p.encodedData = mem.BufferSlice{mem.NewBuffer(&materializedData, nil)} + + // TODO: it should be possible to grab the bufferPool from the underlying + // stream implementation with a type cast to its actual type (such as + // addrConnStream) and accessing the buffer pool directly. + var compData mem.BufferSlice + compData, p.pf, err = compress(p.encodedData, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp, mem.DefaultBufferPool()) if err != nil { return err } - p.hdr, p.payload = msgHeader(data, compData) + + if p.pf.isCompressed() { + materializedCompData := compData.Materialize() + compData.Free() + compData = mem.BufferSlice{mem.NewBuffer(&materializedCompData, nil)} + } + + p.hdr, p.payload = msgHeader(p.encodedData, compData, p.pf) + return nil } diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh deleted file mode 100644 index 3edca296c224c..0000000000000 --- a/vendor/google.golang.org/grpc/regenerate.sh +++ /dev/null @@ -1,123 +0,0 @@ -#!/bin/bash -# Copyright 2020 gRPC authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -eu -o pipefail - -WORKDIR=$(mktemp -d) - -function finish { - rm -rf "$WORKDIR" -} -trap finish EXIT - -export GOBIN=${WORKDIR}/bin -export PATH=${GOBIN}:${PATH} -mkdir -p ${GOBIN} - -echo "remove existing generated files" -# grpc_testing_not_regenerate/*.pb.go is not re-generated, -# see grpc_testing_not_regenerate/README.md for details. -rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testing_not_regenerate') - -echo "go install google.golang.org/protobuf/cmd/protoc-gen-go" -(cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go) - -echo "go install cmd/protoc-gen-go-grpc" -(cd cmd/protoc-gen-go-grpc && go install .) - -echo "git clone https://github.com/grpc/grpc-proto" -git clone --quiet https://github.com/grpc/grpc-proto ${WORKDIR}/grpc-proto - -echo "git clone https://github.com/protocolbuffers/protobuf" -git clone --quiet https://github.com/protocolbuffers/protobuf ${WORKDIR}/protobuf - -# Pull in code.proto as a proto dependency -mkdir -p ${WORKDIR}/googleapis/google/rpc -echo "curl https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto" -curl --silent https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto > ${WORKDIR}/googleapis/google/rpc/code.proto - -mkdir -p ${WORKDIR}/out - -# Generates sources without the embed requirement -LEGACY_SOURCES=( - ${WORKDIR}/grpc-proto/grpc/binlog/v1/binarylog.proto - ${WORKDIR}/grpc-proto/grpc/channelz/v1/channelz.proto - ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto - ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto - profiling/proto/service.proto - ${WORKDIR}/grpc-proto/grpc/reflection/v1alpha/reflection.proto - ${WORKDIR}/grpc-proto/grpc/reflection/v1/reflection.proto -) - -# Generates only the new gRPC Service symbols -SOURCES=( - $(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^profiling/proto/service.proto$') - ${WORKDIR}/grpc-proto/grpc/gcp/altscontext.proto - ${WORKDIR}/grpc-proto/grpc/gcp/handshaker.proto - ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto - ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto - ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto - ${WORKDIR}/grpc-proto/grpc/testing/*.proto - ${WORKDIR}/grpc-proto/grpc/core/*.proto -) - -# These options of the form 'Mfoo.proto=bar' instruct the codegen to use an -# import path of 'bar' in the generated code when 'foo.proto' is imported in -# one of the sources. -# -# Note that the protos listed here are all for testing purposes. All protos to -# be used externally should have a go_package option (and they don't need to be -# listed here). -OPTS=Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\ -Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/messages.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/worker_service.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/control.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/test.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/payloads.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing - -for src in ${SOURCES[@]}; do - echo "protoc ${src}" - protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},use_generic_streams_experimental=true:${WORKDIR}/out \ - -I"." \ - -I${WORKDIR}/grpc-proto \ - -I${WORKDIR}/googleapis \ - -I${WORKDIR}/protobuf/src \ - ${src} -done - -for src in ${LEGACY_SOURCES[@]}; do - echo "protoc ${src}" - protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},require_unimplemented_servers=false:${WORKDIR}/out \ - -I"." \ - -I${WORKDIR}/grpc-proto \ - -I${WORKDIR}/googleapis \ - -I${WORKDIR}/protobuf/src \ - ${src} -done - -# The go_package option in grpc/lookup/v1/rls.proto doesn't match the -# current location. Move it into the right place. -mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 -mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 - -# grpc_testing_not_regenerate/*.pb.go are not re-generated, -# see grpc_testing_not_regenerate/README.md for details. -rm ${WORKDIR}/out/google.golang.org/grpc/reflection/test/grpc_testing_not_regenerate/*.pb.go - -cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/vendor/google.golang.org/grpc/resolver/manual/manual.go b/vendor/google.golang.org/grpc/resolver/manual/manual.go index f2efa2a2cb5a9..09e864a89d35b 100644 --- a/vendor/google.golang.org/grpc/resolver/manual/manual.go +++ b/vendor/google.golang.org/grpc/resolver/manual/manual.go @@ -76,9 +76,11 @@ func (r *Resolver) InitialState(s resolver.State) { // Build returns itself for Resolver, because it's both a builder and a resolver. func (r *Resolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { - r.BuildCallback(target, cc, opts) r.mu.Lock() defer r.mu.Unlock() + // Call BuildCallback after locking to avoid a race when UpdateState + // or ReportError is called before Build returns. + r.BuildCallback(target, cc, opts) r.CC = cc if r.lastSeenState != nil { err := r.CC.UpdateState(*r.lastSeenState) diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go index c5fb45236faf6..23bb3fb258240 100644 --- a/vendor/google.golang.org/grpc/resolver_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_wrapper.go @@ -66,7 +66,7 @@ func newCCResolverWrapper(cc *ClientConn) *ccResolverWrapper { // any newly created ccResolverWrapper, except that close may be called instead. func (ccr *ccResolverWrapper) start() error { errCh := make(chan error) - ccr.serializer.Schedule(func(ctx context.Context) { + ccr.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil { return } @@ -85,7 +85,7 @@ func (ccr *ccResolverWrapper) start() error { } func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { - ccr.serializer.Schedule(func(ctx context.Context) { + ccr.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || ccr.resolver == nil { return } @@ -102,7 +102,7 @@ func (ccr *ccResolverWrapper) close() { ccr.closed = true ccr.mu.Unlock() - ccr.serializer.Schedule(func(context.Context) { + ccr.serializer.TrySchedule(func(context.Context) { if ccr.resolver == nil { return } @@ -177,6 +177,9 @@ func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.P // addChannelzTraceEvent adds a channelz trace event containing the new // state received from resolver implementations. func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { + if !logger.V(0) && !channelz.IsOn() { + return + } var updates []string var oldSC, newSC *ServiceConfig var oldOK, newOK bool diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index fdd49e6e91510..aba1ae3e6784c 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -19,7 +19,6 @@ package grpc import ( - "bytes" "compress/gzip" "context" "encoding/binary" @@ -35,6 +34,7 @@ import ( "google.golang.org/grpc/encoding" "google.golang.org/grpc/encoding/proto" "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -220,8 +220,8 @@ type HeaderCallOption struct { HeaderAddr *metadata.MD } -func (o HeaderCallOption) before(c *callInfo) error { return nil } -func (o HeaderCallOption) after(c *callInfo, attempt *csAttempt) { +func (o HeaderCallOption) before(*callInfo) error { return nil } +func (o HeaderCallOption) after(_ *callInfo, attempt *csAttempt) { *o.HeaderAddr, _ = attempt.s.Header() } @@ -242,8 +242,8 @@ type TrailerCallOption struct { TrailerAddr *metadata.MD } -func (o TrailerCallOption) before(c *callInfo) error { return nil } -func (o TrailerCallOption) after(c *callInfo, attempt *csAttempt) { +func (o TrailerCallOption) before(*callInfo) error { return nil } +func (o TrailerCallOption) after(_ *callInfo, attempt *csAttempt) { *o.TrailerAddr = attempt.s.Trailer() } @@ -264,24 +264,20 @@ type PeerCallOption struct { PeerAddr *peer.Peer } -func (o PeerCallOption) before(c *callInfo) error { return nil } -func (o PeerCallOption) after(c *callInfo, attempt *csAttempt) { +func (o PeerCallOption) before(*callInfo) error { return nil } +func (o PeerCallOption) after(_ *callInfo, attempt *csAttempt) { if x, ok := peer.FromContext(attempt.s.Context()); ok { *o.PeerAddr = *x } } -// WaitForReady configures the action to take when an RPC is attempted on broken -// connections or unreachable servers. If waitForReady is false and the -// connection is in the TRANSIENT_FAILURE state, the RPC will fail -// immediately. Otherwise, the RPC client will block the call until a -// connection is available (or the call is canceled or times out) and will -// retry the call if it fails due to a transient error. gRPC will not retry if -// data was written to the wire unless the server indicates it did not process -// the data. Please refer to -// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md. +// WaitForReady configures the RPC's behavior when the client is in +// TRANSIENT_FAILURE, which occurs when all addresses fail to connect. If +// waitForReady is false, the RPC will fail immediately. Otherwise, the client +// will wait until a connection becomes available or the RPC's deadline is +// reached. // -// By default, RPCs don't "wait for ready". +// By default, RPCs do not "wait for ready". func WaitForReady(waitForReady bool) CallOption { return FailFastCallOption{FailFast: !waitForReady} } @@ -308,7 +304,7 @@ func (o FailFastCallOption) before(c *callInfo) error { c.failFast = o.FailFast return nil } -func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o FailFastCallOption) after(*callInfo, *csAttempt) {} // OnFinish returns a CallOption that configures a callback to be called when // the call completes. The error passed to the callback is the status of the @@ -343,7 +339,7 @@ func (o OnFinishCallOption) before(c *callInfo) error { return nil } -func (o OnFinishCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o OnFinishCallOption) after(*callInfo, *csAttempt) {} // MaxCallRecvMsgSize returns a CallOption which sets the maximum message size // in bytes the client can receive. If this is not set, gRPC uses the default @@ -367,7 +363,7 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { c.maxReceiveMessageSize = &o.MaxRecvMsgSize return nil } -func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o MaxRecvMsgSizeCallOption) after(*callInfo, *csAttempt) {} // MaxCallSendMsgSize returns a CallOption which sets the maximum message size // in bytes the client can send. If this is not set, gRPC uses the default @@ -391,7 +387,7 @@ func (o MaxSendMsgSizeCallOption) before(c *callInfo) error { c.maxSendMessageSize = &o.MaxSendMsgSize return nil } -func (o MaxSendMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o MaxSendMsgSizeCallOption) after(*callInfo, *csAttempt) {} // PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials // for a call. @@ -414,7 +410,7 @@ func (o PerRPCCredsCallOption) before(c *callInfo) error { c.creds = o.Creds return nil } -func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o PerRPCCredsCallOption) after(*callInfo, *csAttempt) {} // UseCompressor returns a CallOption which sets the compressor used when // sending the request. If WithCompressor is also set, UseCompressor has @@ -442,7 +438,7 @@ func (o CompressorCallOption) before(c *callInfo) error { c.compressorType = o.CompressorType return nil } -func (o CompressorCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o CompressorCallOption) after(*callInfo, *csAttempt) {} // CallContentSubtype returns a CallOption that will set the content-subtype // for a call. For example, if content-subtype is "json", the Content-Type over @@ -479,7 +475,7 @@ func (o ContentSubtypeCallOption) before(c *callInfo) error { c.contentSubtype = o.ContentSubtype return nil } -func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o ContentSubtypeCallOption) after(*callInfo, *csAttempt) {} // ForceCodec returns a CallOption that will set codec to be used for all // request and response messages for a call. The result of calling Name() will @@ -515,10 +511,50 @@ type ForceCodecCallOption struct { } func (o ForceCodecCallOption) before(c *callInfo) error { - c.codec = o.Codec + c.codec = newCodecV1Bridge(o.Codec) return nil } -func (o ForceCodecCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o ForceCodecCallOption) after(*callInfo, *csAttempt) {} + +// ForceCodecV2 returns a CallOption that will set codec to be used for all +// request and response messages for a call. The result of calling Name() will +// be used as the content-subtype after converting to lowercase, unless +// CallContentSubtype is also used. +// +// See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. Also see the documentation on RegisterCodec and +// CallContentSubtype for more details on the interaction between Codec and +// content-subtype. +// +// This function is provided for advanced users; prefer to use only +// CallContentSubtype to select a registered codec instead. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceCodecV2(codec encoding.CodecV2) CallOption { + return ForceCodecV2CallOption{CodecV2: codec} +} + +// ForceCodecV2CallOption is a CallOption that indicates the codec used for +// marshaling messages. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ForceCodecV2CallOption struct { + CodecV2 encoding.CodecV2 +} + +func (o ForceCodecV2CallOption) before(c *callInfo) error { + c.codec = o.CodecV2 + return nil +} + +func (o ForceCodecV2CallOption) after(*callInfo, *csAttempt) {} // CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of // an encoding.Codec. @@ -540,10 +576,10 @@ type CustomCodecCallOption struct { } func (o CustomCodecCallOption) before(c *callInfo) error { - c.codec = o.Codec + c.codec = newCodecV0Bridge(o.Codec) return nil } -func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o CustomCodecCallOption) after(*callInfo, *csAttempt) {} // MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory // used for buffering this RPC's requests for retry purposes. @@ -571,7 +607,7 @@ func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error { c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize return nil } -func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o MaxRetryRPCBufferSizeCallOption) after(*callInfo, *csAttempt) {} // The format of the payload: compressed or not? type payloadFormat uint8 @@ -581,19 +617,28 @@ const ( compressionMade payloadFormat = 1 // compressed ) +func (pf payloadFormat) isCompressed() bool { + return pf == compressionMade +} + +type streamReader interface { + ReadHeader(header []byte) error + Read(n int) (mem.BufferSlice, error) +} + // parser reads complete gRPC messages from the underlying reader. type parser struct { // r is the underlying reader. // See the comment on recvMsg for the permissible // error types. - r io.Reader + r streamReader // The header of a gRPC message. Find more detail at // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md header [5]byte - // recvBufferPool is the pool of shared receive buffers. - recvBufferPool SharedBufferPool + // bufferPool is the pool of shared receive buffers. + bufferPool mem.BufferPool } // recvMsg reads a complete gRPC message from the stream. @@ -608,14 +653,15 @@ type parser struct { // - an error from the status package // // No other error values or types must be returned, which also means -// that the underlying io.Reader must not return an incompatible +// that the underlying streamReader must not return an incompatible // error. -func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) { - if _, err := p.r.Read(p.header[:]); err != nil { +func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSlice, error) { + err := p.r.ReadHeader(p.header[:]) + if err != nil { return 0, nil, err } - pf = payloadFormat(p.header[0]) + pf := payloadFormat(p.header[0]) length := binary.BigEndian.Uint32(p.header[1:]) if length == 0 { @@ -627,20 +673,21 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt if int(length) > maxReceiveMessageSize { return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) } - msg = p.recvBufferPool.Get(int(length)) - if _, err := p.r.Read(msg); err != nil { + + data, err := p.r.Read(int(length)) + if err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF } return 0, nil, err } - return pf, msg, nil + return pf, data, nil } // encode serializes msg and returns a buffer containing the message, or an // error if it is too large to be transmitted by grpc. If msg is nil, it // generates an empty message. -func encode(c baseCodec, msg any) ([]byte, error) { +func encode(c baseCodec, msg any) (mem.BufferSlice, error) { if msg == nil { // NOTE: typed nils will not be caught by this check return nil, nil } @@ -648,7 +695,8 @@ func encode(c baseCodec, msg any) ([]byte, error) { if err != nil { return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) } - if uint(len(b)) > math.MaxUint32 { + if uint(b.Len()) > math.MaxUint32 { + b.Free() return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b)) } return b, nil @@ -659,34 +707,41 @@ func encode(c baseCodec, msg any) ([]byte, error) { // indicating no compression was done. // // TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor. -func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) { - if compressor == nil && cp == nil { - return nil, nil - } - if len(in) == 0 { - return nil, nil +func compress(in mem.BufferSlice, cp Compressor, compressor encoding.Compressor, pool mem.BufferPool) (mem.BufferSlice, payloadFormat, error) { + if (compressor == nil && cp == nil) || in.Len() == 0 { + return nil, compressionNone, nil } + var out mem.BufferSlice + w := mem.NewWriter(&out, pool) wrapErr := func(err error) error { + out.Free() return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) } - cbuf := &bytes.Buffer{} if compressor != nil { - z, err := compressor.Compress(cbuf) + z, err := compressor.Compress(w) if err != nil { - return nil, wrapErr(err) + return nil, 0, wrapErr(err) } - if _, err := z.Write(in); err != nil { - return nil, wrapErr(err) + for _, b := range in { + if _, err := z.Write(b.ReadOnlyData()); err != nil { + return nil, 0, wrapErr(err) + } } if err := z.Close(); err != nil { - return nil, wrapErr(err) + return nil, 0, wrapErr(err) } } else { - if err := cp.Do(cbuf, in); err != nil { - return nil, wrapErr(err) + // This is obviously really inefficient since it fully materializes the data, but + // there is no way around this with the old Compressor API. At least it attempts + // to return the buffer to the provider, in the hopes it can be reused (maybe + // even by a subsequent call to this very function). + buf := in.MaterializeToBuffer(pool) + defer buf.Free() + if err := cp.Do(w, buf.ReadOnlyData()); err != nil { + return nil, 0, wrapErr(err) } } - return cbuf.Bytes(), nil + return out, compressionMade, nil } const ( @@ -697,33 +752,36 @@ const ( // msgHeader returns a 5-byte header for the message being transmitted and the // payload, which is compData if non-nil or data otherwise. -func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { +func msgHeader(data, compData mem.BufferSlice, pf payloadFormat) (hdr []byte, payload mem.BufferSlice) { hdr = make([]byte, headerLen) - if compData != nil { - hdr[0] = byte(compressionMade) - data = compData + hdr[0] = byte(pf) + + var length uint32 + if pf.isCompressed() { + length = uint32(compData.Len()) + payload = compData } else { - hdr[0] = byte(compressionNone) + length = uint32(data.Len()) + payload = data } // Write length of payload into buf - binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data))) - return hdr, data + binary.BigEndian.PutUint32(hdr[payloadLen:], length) + return hdr, payload } -func outPayload(client bool, msg any, data, payload []byte, t time.Time) *stats.OutPayload { +func outPayload(client bool, msg any, dataLength, payloadLength int, t time.Time) *stats.OutPayload { return &stats.OutPayload{ Client: client, Payload: msg, - Data: data, - Length: len(data), - WireLength: len(payload) + headerLen, - CompressedLength: len(payload), + Length: dataLength, + WireLength: payloadLength + headerLen, + CompressedLength: payloadLength, SentTime: t, } } -func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status { +func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool, isServer bool) *status.Status { switch pf { case compressionNone: case compressionMade: @@ -731,7 +789,10 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding") } if !haveCompressor { - return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + if isServer { + return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + } + return status.Newf(codes.Internal, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) } default: return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf) @@ -741,104 +802,129 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool type payloadInfo struct { compressedLength int // The compressed length got from wire. - uncompressedBytes []byte + uncompressedBytes mem.BufferSlice +} + +func (p *payloadInfo) free() { + if p != nil && p.uncompressedBytes != nil { + p.uncompressedBytes.Free() + } } // recvAndDecompress reads a message from the stream, decompressing it if necessary. // // Cancelling the returned cancel function releases the buffer back to the pool. So the caller should cancel as soon as // the buffer is no longer needed. -func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, -) (uncompressedBuf []byte, cancel func(), err error) { - pf, compressedBuf, err := p.recvMsg(maxReceiveMessageSize) +// TODO: Refactor this function to reduce the number of arguments. +// See: https://google.github.io/styleguide/go/best-practices.html#function-argument-lists +func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool, +) (out mem.BufferSlice, err error) { + pf, compressed, err := p.recvMsg(maxReceiveMessageSize) if err != nil { - return nil, nil, err + return nil, err } - if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { - return nil, nil, st.Err() + compressedLength := compressed.Len() + + if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil, isServer); st != nil { + compressed.Free() + return nil, st.Err() } var size int - if pf == compressionMade { + if pf.isCompressed() { + defer compressed.Free() + // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, // use this decompressor as the default. if dc != nil { - uncompressedBuf, err = dc.Do(bytes.NewReader(compressedBuf)) + var uncompressedBuf []byte + uncompressedBuf, err = dc.Do(compressed.Reader()) + if err == nil { + out = mem.BufferSlice{mem.NewBuffer(&uncompressedBuf, nil)} + } size = len(uncompressedBuf) } else { - uncompressedBuf, size, err = decompress(compressor, compressedBuf, maxReceiveMessageSize) + out, size, err = decompress(compressor, compressed, maxReceiveMessageSize, p.bufferPool) } if err != nil { - return nil, nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) } if size > maxReceiveMessageSize { + out.Free() // TODO: Revisit the error code. Currently keep it consistent with java // implementation. - return nil, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) + return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) } } else { - uncompressedBuf = compressedBuf + out = compressed } if payInfo != nil { - payInfo.compressedLength = len(compressedBuf) - payInfo.uncompressedBytes = uncompressedBuf - - cancel = func() {} - } else { - cancel = func() { - p.recvBufferPool.Put(&compressedBuf) - } + payInfo.compressedLength = compressedLength + out.Ref() + payInfo.uncompressedBytes = out } - return uncompressedBuf, cancel, nil + return out, nil } // Using compressor, decompress d, returning data and size. // Optionally, if data will be over maxReceiveMessageSize, just return the size. -func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize int) ([]byte, int, error) { - dcReader, err := compressor.Decompress(bytes.NewReader(d)) +func decompress(compressor encoding.Compressor, d mem.BufferSlice, maxReceiveMessageSize int, pool mem.BufferPool) (mem.BufferSlice, int, error) { + dcReader, err := compressor.Decompress(d.Reader()) if err != nil { return nil, 0, err } - if sizer, ok := compressor.(interface { - DecompressedSize(compressedBytes []byte) int - }); ok { - if size := sizer.DecompressedSize(d); size >= 0 { - if size > maxReceiveMessageSize { - return nil, size, nil - } - // size is used as an estimate to size the buffer, but we - // will read more data if available. - // +MinRead so ReadFrom will not reallocate if size is correct. - // - // TODO: If we ensure that the buffer size is the same as the DecompressedSize, - // we can also utilize the recv buffer pool here. - buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) - bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) - return buf.Bytes(), int(bytesRead), err - } + + // TODO: Can/should this still be preserved with the new BufferSlice API? Are + // there any actual benefits to allocating a single large buffer instead of + // multiple smaller ones? + //if sizer, ok := compressor.(interface { + // DecompressedSize(compressedBytes []byte) int + //}); ok { + // if size := sizer.DecompressedSize(d); size >= 0 { + // if size > maxReceiveMessageSize { + // return nil, size, nil + // } + // // size is used as an estimate to size the buffer, but we + // // will read more data if available. + // // +MinRead so ReadFrom will not reallocate if size is correct. + // // + // // TODO: If we ensure that the buffer size is the same as the DecompressedSize, + // // we can also utilize the recv buffer pool here. + // buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) + // bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + // return buf.Bytes(), int(bytesRead), err + // } + //} + + var out mem.BufferSlice + _, err = io.Copy(mem.NewWriter(&out, pool), io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + if err != nil { + out.Free() + return nil, 0, err } - // Read from LimitReader with limit max+1. So if the underlying - // reader is over limit, the result will be bigger than max. - d, err = io.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) - return d, len(d), err + return out, out.Len(), nil } // For the two compressor parameters, both should not be set, but if they are, // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? -func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { - buf, cancel, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) +func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error { + data, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor, isServer) if err != nil { return err } - defer cancel() - if err := c.Unmarshal(buf, m); err != nil { + // If the codec wants its own reference to the data, it can get it. Otherwise, always + // free the buffers. + defer data.Free() + + if err := c.Unmarshal(data, m); err != nil { return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err) } + return nil } @@ -941,7 +1027,7 @@ func setCallInfoCodec(c *callInfo) error { // encoding.Codec (Name vs. String method name). We only support // setting content subtype from encoding.Codec to avoid a behavior // change with the deprecated version. - if ec, ok := c.codec.(encoding.Codec); ok { + if ec, ok := c.codec.(encoding.CodecV2); ok { c.contentSubtype = strings.ToLower(ec.Name()) } } @@ -950,12 +1036,12 @@ func setCallInfoCodec(c *callInfo) error { if c.contentSubtype == "" { // No codec specified in CallOptions; use proto by default. - c.codec = encoding.GetCodec(proto.Name) + c.codec = getCodec(proto.Name) return nil } // c.contentSubtype is already lowercased in CallContentSubtype - c.codec = encoding.GetCodec(c.contentSubtype) + c.codec = getCodec(c.contentSubtype) if c.codec == nil { return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype) } diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 89f8e4792bf15..d1e1415a40f9b 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -45,6 +45,7 @@ import ( "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -80,7 +81,7 @@ func init() { } internal.BinaryLogger = binaryLogger internal.JoinServerOptions = newJoinServerOption - internal.RecvBufferPool = recvBufferPool + internal.BufferPool = bufferPool } var statusOK = status.New(codes.OK, "") @@ -170,7 +171,7 @@ type serverOptions struct { maxHeaderListSize *uint32 headerTableSize *uint32 numServerWorkers uint32 - recvBufferPool SharedBufferPool + bufferPool mem.BufferPool waitForHandlers bool } @@ -181,7 +182,7 @@ var defaultServerOptions = serverOptions{ connectionTimeout: 120 * time.Second, writeBufferSize: defaultWriteBufSize, readBufferSize: defaultReadBufSize, - recvBufferPool: nopBufferPool{}, + bufferPool: mem.DefaultBufferPool(), } var globalServerOptions []ServerOption @@ -313,7 +314,7 @@ func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption { // Will be supported throughout 1.x. func CustomCodec(codec Codec) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.codec = codec + o.codec = newCodecV0Bridge(codec) }) } @@ -342,7 +343,22 @@ func CustomCodec(codec Codec) ServerOption { // later release. func ForceServerCodec(codec encoding.Codec) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.codec = codec + o.codec = newCodecV1Bridge(codec) + }) +} + +// ForceServerCodecV2 is the equivalent of ForceServerCodec, but for the new +// CodecV2 interface. +// +// Will be supported throughout 1.x. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceServerCodecV2(codecV2 encoding.CodecV2) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.codec = codecV2 }) } @@ -592,26 +608,9 @@ func WaitForHandlers(w bool) ServerOption { }) } -// RecvBufferPool returns a ServerOption that configures the server -// to use the provided shared buffer pool for parsing incoming messages. Depending -// on the application's workload, this could result in reduced memory allocation. -// -// If you are unsure about how to implement a memory pool but want to utilize one, -// begin with grpc.NewSharedBufferPool. -// -// Note: The shared buffer pool feature will not be active if any of the following -// options are used: StatsHandler, EnableTracing, or binary logging. In such -// cases, the shared buffer pool will be ignored. -// -// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in -// v1.60.0 or later. -func RecvBufferPool(bufferPool SharedBufferPool) ServerOption { - return recvBufferPool(bufferPool) -} - -func recvBufferPool(bufferPool SharedBufferPool) ServerOption { +func bufferPool(bufferPool mem.BufferPool) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.recvBufferPool = bufferPool + o.bufferPool = bufferPool }) } @@ -622,7 +621,7 @@ func recvBufferPool(bufferPool SharedBufferPool) ServerOption { // workload (assuming a QPS of a few thousand requests/sec). const serverWorkerResetThreshold = 1 << 16 -// serverWorkers blocks on a *transport.Stream channel forever and waits for +// serverWorker blocks on a *transport.Stream channel forever and waits for // data to be fed by serveStreams. This allows multiple requests to be // processed by the same goroutine, removing the need for expensive stack // re-allocations (see the runtime.morestack problem [1]). @@ -980,6 +979,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { ChannelzParent: s.channelz, MaxHeaderListSize: s.opts.maxHeaderListSize, HeaderTableSize: s.opts.headerTableSize, + BufferPool: s.opts.bufferPool, } st, err := transport.NewServerTransport(c, config) if err != nil { @@ -1072,7 +1072,7 @@ var _ http.Handler = (*Server)(nil) // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { - st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers) + st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers, s.opts.bufferPool) if err != nil { // Errors returned from transport.NewServerHandlerTransport have // already been written to w. @@ -1142,20 +1142,35 @@ func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, channelz.Error(logger, s.channelz, "grpc: server failed to encode response: ", err) return err } - compData, err := compress(data, cp, comp) + + compData, pf, err := compress(data, cp, comp, s.opts.bufferPool) if err != nil { + data.Free() channelz.Error(logger, s.channelz, "grpc: server failed to compress response: ", err) return err } - hdr, payload := msgHeader(data, compData) + + hdr, payload := msgHeader(data, compData, pf) + + defer func() { + compData.Free() + data.Free() + // payload does not need to be freed here, it is either data or compData, both of + // which are already freed. + }() + + dataLen := data.Len() + payloadLen := payload.Len() // TODO(dfawley): should we be checking len(data) instead? - if len(payload) > s.opts.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize) + if payloadLen > s.opts.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize) } err = t.Write(stream, hdr, payload, opts) if err == nil { - for _, sh := range s.opts.statsHandlers { - sh.HandleRPC(ctx, outPayload(false, msg, data, payload, time.Now())) + if len(s.opts.statsHandlers) != 0 { + for _, sh := range s.opts.statsHandlers { + sh.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now())) + } } } return err @@ -1334,37 +1349,37 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor var payInfo *payloadInfo if len(shs) != 0 || len(binlogs) != 0 { payInfo = &payloadInfo{} + defer payInfo.free() } - d, cancel, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) + d, err := recvAndDecompress(&parser{r: stream, bufferPool: s.opts.bufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp, true) if err != nil { if e := t.WriteStatus(stream, status.Convert(err)); e != nil { channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } return err } + defer d.Free() if channelz.IsOn() { t.IncrMsgRecv() } df := func(v any) error { - defer cancel() - if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } + for _, sh := range shs { sh.HandleRPC(ctx, &stats.InPayload{ RecvTime: time.Now(), Payload: v, - Length: len(d), + Length: d.Len(), WireLength: payInfo.compressedLength + headerLen, CompressedLength: payInfo.compressedLength, - Data: d, }) } if len(binlogs) != 0 { cm := &binarylog.ClientMessage{ - Message: d, + Message: d.Materialize(), } for _, binlog := range binlogs { binlog.Log(ctx, cm) @@ -1548,7 +1563,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran ctx: ctx, t: t, s: stream, - p: &parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, + p: &parser{r: stream, bufferPool: s.opts.bufferPool}, codec: s.getCodec(stream.ContentSubtype()), maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, @@ -1963,12 +1978,12 @@ func (s *Server) getCodec(contentSubtype string) baseCodec { return s.opts.codec } if contentSubtype == "" { - return encoding.GetCodec(proto.Name) + return getCodec(proto.Name) } - codec := encoding.GetCodec(contentSubtype) + codec := getCodec(contentSubtype) if codec == nil { logger.Warningf("Unsupported codec %q. Defaulting to %q for now. This will start to fail in future releases.", contentSubtype, proto.Name) - return encoding.GetCodec(proto.Name) + return getCodec(proto.Name) } return codec } diff --git a/vendor/google.golang.org/grpc/shared_buffer_pool.go b/vendor/google.golang.org/grpc/shared_buffer_pool.go deleted file mode 100644 index 48a64cfe8e256..0000000000000 --- a/vendor/google.golang.org/grpc/shared_buffer_pool.go +++ /dev/null @@ -1,154 +0,0 @@ -/* - * - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import "sync" - -// SharedBufferPool is a pool of buffers that can be shared, resulting in -// decreased memory allocation. Currently, in gRPC-go, it is only utilized -// for parsing incoming messages. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -type SharedBufferPool interface { - // Get returns a buffer with specified length from the pool. - // - // The returned byte slice may be not zero initialized. - Get(length int) []byte - - // Put returns a buffer to the pool. - Put(*[]byte) -} - -// NewSharedBufferPool creates a simple SharedBufferPool with buckets -// of different sizes to optimize memory usage. This prevents the pool from -// wasting large amounts of memory, even when handling messages of varying sizes. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func NewSharedBufferPool() SharedBufferPool { - return &simpleSharedBufferPool{ - pools: [poolArraySize]simpleSharedBufferChildPool{ - newBytesPool(level0PoolMaxSize), - newBytesPool(level1PoolMaxSize), - newBytesPool(level2PoolMaxSize), - newBytesPool(level3PoolMaxSize), - newBytesPool(level4PoolMaxSize), - newBytesPool(0), - }, - } -} - -// simpleSharedBufferPool is a simple implementation of SharedBufferPool. -type simpleSharedBufferPool struct { - pools [poolArraySize]simpleSharedBufferChildPool -} - -func (p *simpleSharedBufferPool) Get(size int) []byte { - return p.pools[p.poolIdx(size)].Get(size) -} - -func (p *simpleSharedBufferPool) Put(bs *[]byte) { - p.pools[p.poolIdx(cap(*bs))].Put(bs) -} - -func (p *simpleSharedBufferPool) poolIdx(size int) int { - switch { - case size <= level0PoolMaxSize: - return level0PoolIdx - case size <= level1PoolMaxSize: - return level1PoolIdx - case size <= level2PoolMaxSize: - return level2PoolIdx - case size <= level3PoolMaxSize: - return level3PoolIdx - case size <= level4PoolMaxSize: - return level4PoolIdx - default: - return levelMaxPoolIdx - } -} - -const ( - level0PoolMaxSize = 16 // 16 B - level1PoolMaxSize = level0PoolMaxSize * 16 // 256 B - level2PoolMaxSize = level1PoolMaxSize * 16 // 4 KB - level3PoolMaxSize = level2PoolMaxSize * 16 // 64 KB - level4PoolMaxSize = level3PoolMaxSize * 16 // 1 MB -) - -const ( - level0PoolIdx = iota - level1PoolIdx - level2PoolIdx - level3PoolIdx - level4PoolIdx - levelMaxPoolIdx - poolArraySize -) - -type simpleSharedBufferChildPool interface { - Get(size int) []byte - Put(any) -} - -type bufferPool struct { - sync.Pool - - defaultSize int -} - -func (p *bufferPool) Get(size int) []byte { - bs := p.Pool.Get().(*[]byte) - - if cap(*bs) < size { - p.Pool.Put(bs) - - return make([]byte, size) - } - - return (*bs)[:size] -} - -func newBytesPool(size int) simpleSharedBufferChildPool { - return &bufferPool{ - Pool: sync.Pool{ - New: func() any { - bs := make([]byte, size) - return &bs - }, - }, - defaultSize: size, - } -} - -// nopBufferPool is a buffer pool just makes new buffer without pooling. -type nopBufferPool struct { -} - -func (nopBufferPool) Get(length int) []byte { - return make([]byte, length) -} - -func (nopBufferPool) Put(*[]byte) { -} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index fdb0bd65182c5..71195c4943d7a 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -77,9 +77,6 @@ type InPayload struct { // the call to HandleRPC which provides the InPayload returns and must be // copied if needed later. Payload any - // Data is the serialized message payload. - // Deprecated: Data will be removed in the next release. - Data []byte // Length is the size of the uncompressed payload data. Does not include any // framing (gRPC or HTTP/2). @@ -150,9 +147,6 @@ type OutPayload struct { // the call to HandleRPC which provides the OutPayload returns and must be // copied if needed later. Payload any - // Data is the serialized message payload. - // Deprecated: Data will be removed in the next release. - Data []byte // Length is the size of the uncompressed payload data. Does not include any // framing (gRPC or HTTP/2). Length int diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 8051ef5b514a3..bb2b2a216ce24 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -41,6 +41,7 @@ import ( "google.golang.org/grpc/internal/serviceconfig" istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -359,7 +360,7 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client cs.attempt = a return nil } - if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { + if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) }); err != nil { return nil, err } @@ -517,7 +518,7 @@ func (a *csAttempt) newStream() error { } a.s = s a.ctx = s.Context() - a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool} + a.p = &parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool} return nil } @@ -566,10 +567,15 @@ type clientStream struct { // place where we need to check if the attempt is nil. attempt *csAttempt // TODO(hedging): hedging will have multiple attempts simultaneously. - committed bool // active attempt committed for retry? - onCommit func() - buffer []func(a *csAttempt) error // operations to replay on retry - bufferSize int // current size of buffer + committed bool // active attempt committed for retry? + onCommit func() + replayBuffer []replayOp // operations to replay on retry + replayBufferSize int // current size of replayBuffer +} + +type replayOp struct { + op func(a *csAttempt) error + cleanup func() } // csAttempt implements a single transport stream attempt within a @@ -607,7 +613,12 @@ func (cs *clientStream) commitAttemptLocked() { cs.onCommit() } cs.committed = true - cs.buffer = nil + for _, op := range cs.replayBuffer { + if op.cleanup != nil { + op.cleanup() + } + } + cs.replayBuffer = nil } func (cs *clientStream) commitAttempt() { @@ -732,7 +743,7 @@ func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error { // the stream is canceled. return err } - // Note that the first op in the replay buffer always sets cs.attempt + // Note that the first op in replayBuffer always sets cs.attempt // if it is able to pick a transport and create a stream. if lastErr = cs.replayBufferLocked(attempt); lastErr == nil { return nil @@ -761,7 +772,7 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) // already be status errors. return toRPCErr(op(cs.attempt)) } - if len(cs.buffer) == 0 { + if len(cs.replayBuffer) == 0 { // For the first op, which controls creation of the stream and // assigns cs.attempt, we need to create a new attempt inline // before executing the first op. On subsequent ops, the attempt @@ -851,25 +862,26 @@ func (cs *clientStream) Trailer() metadata.MD { } func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error { - for _, f := range cs.buffer { - if err := f(attempt); err != nil { + for _, f := range cs.replayBuffer { + if err := f.op(attempt); err != nil { return err } } return nil } -func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) { +func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error, cleanup func()) { // Note: we still will buffer if retry is disabled (for transparent retries). if cs.committed { return } - cs.bufferSize += sz - if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize { + cs.replayBufferSize += sz + if cs.replayBufferSize > cs.callInfo.maxRetryRPCBufferSize { cs.commitAttemptLocked() + cleanup() return } - cs.buffer = append(cs.buffer, op) + cs.replayBuffer = append(cs.replayBuffer, replayOp{op: op, cleanup: cleanup}) } func (cs *clientStream) SendMsg(m any) (err error) { @@ -891,23 +903,50 @@ func (cs *clientStream) SendMsg(m any) (err error) { } // load hdr, payload, data - hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp) + hdr, data, payload, pf, err := prepareMsg(m, cs.codec, cs.cp, cs.comp, cs.cc.dopts.copts.BufferPool) if err != nil { return err } + defer func() { + data.Free() + // only free payload if compression was made, and therefore it is a different set + // of buffers from data. + if pf.isCompressed() { + payload.Free() + } + }() + + dataLen := data.Len() + payloadLen := payload.Len() // TODO(dfawley): should we be checking len(data) instead? - if len(payload) > *cs.callInfo.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) + if payloadLen > *cs.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, *cs.callInfo.maxSendMessageSize) } + + // always take an extra ref in case data == payload (i.e. when the data isn't + // compressed). The original ref will always be freed by the deferred free above. + payload.Ref() op := func(a *csAttempt) error { - return a.sendMsg(m, hdr, payload, data) + return a.sendMsg(m, hdr, payload, dataLen, payloadLen) + } + + // onSuccess is invoked when the op is captured for a subsequent retry. If the + // stream was established by a previous message and therefore retries are + // disabled, onSuccess will not be invoked, and payloadRef can be freed + // immediately. + onSuccessCalled := false + err = cs.withRetry(op, func() { + cs.bufferForRetryLocked(len(hdr)+payloadLen, op, payload.Free) + onSuccessCalled = true + }) + if !onSuccessCalled { + payload.Free() } - err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) if len(cs.binlogs) != 0 && err == nil { cm := &binarylog.ClientMessage{ OnClientSide: true, - Message: data, + Message: data.Materialize(), } for _, binlog := range cs.binlogs { binlog.Log(cs.ctx, cm) @@ -924,6 +963,7 @@ func (cs *clientStream) RecvMsg(m any) error { var recvInfo *payloadInfo if len(cs.binlogs) != 0 { recvInfo = &payloadInfo{} + defer recvInfo.free() } err := cs.withRetry(func(a *csAttempt) error { return a.recvMsg(m, recvInfo) @@ -931,7 +971,7 @@ func (cs *clientStream) RecvMsg(m any) error { if len(cs.binlogs) != 0 && err == nil { sm := &binarylog.ServerMessage{ OnClientSide: true, - Message: recvInfo.uncompressedBytes, + Message: recvInfo.uncompressedBytes.Materialize(), } for _, binlog := range cs.binlogs { binlog.Log(cs.ctx, sm) @@ -958,7 +998,7 @@ func (cs *clientStream) CloseSend() error { // RecvMsg. This also matches historical behavior. return nil } - cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }) + cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) }) if len(cs.binlogs) != 0 { chc := &binarylog.ClientHalfClose{ OnClientSide: true, @@ -1034,7 +1074,7 @@ func (cs *clientStream) finish(err error) { cs.cancel() } -func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error { +func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength, payloadLength int) error { cs := a.cs if a.trInfo != nil { a.mu.Lock() @@ -1052,8 +1092,10 @@ func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error { } return io.EOF } - for _, sh := range a.statsHandlers { - sh.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now())) + if len(a.statsHandlers) != 0 { + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now())) + } } if channelz.IsOn() { a.t.IncrMsgSent() @@ -1065,6 +1107,7 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { cs := a.cs if len(a.statsHandlers) != 0 && payInfo == nil { payInfo = &payloadInfo{} + defer payInfo.free() } if !a.decompSet { @@ -1083,8 +1126,7 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { // Only initialize this state once per stream. a.decompSet = true } - err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp) - if err != nil { + if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp, false); err != nil { if err == io.EOF { if statusErr := a.s.Status().Err(); statusErr != nil { return statusErr @@ -1103,14 +1145,12 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { } for _, sh := range a.statsHandlers { sh.HandleRPC(a.ctx, &stats.InPayload{ - Client: true, - RecvTime: time.Now(), - Payload: m, - // TODO truncate large payload. - Data: payInfo.uncompressedBytes, + Client: true, + RecvTime: time.Now(), + Payload: m, WireLength: payInfo.compressedLength + headerLen, CompressedLength: payInfo.compressedLength, - Length: len(payInfo.uncompressedBytes), + Length: payInfo.uncompressedBytes.Len(), }) } if channelz.IsOn() { @@ -1122,14 +1162,12 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { } // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp) - if err == nil { - return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) - } - if err == io.EOF { + if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp, false); err == io.EOF { return a.s.Status().Err() // non-server streaming Recv returns nil on success + } else if err != nil { + return toRPCErr(err) } - return toRPCErr(err) + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) } func (a *csAttempt) finish(err error) { @@ -1185,12 +1223,12 @@ func (a *csAttempt) finish(err error) { a.mu.Unlock() } -// newClientStream creates a ClientStream with the specified transport, on the +// newNonRetryClientStream creates a ClientStream with the specified transport, on the // given addrConn. // // It's expected that the given transport is either the same one in addrConn, or // is already closed. To avoid race, transport is specified separately, instead -// of using ac.transpot. +// of using ac.transport. // // Main difference between this and ClientConn.NewStream: // - no retry @@ -1276,7 +1314,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin return nil, err } as.s = s - as.p = &parser{r: s, recvBufferPool: ac.dopts.recvBufferPool} + as.p = &parser{r: s, bufferPool: ac.dopts.copts.BufferPool} ac.incrCallsStarted() if desc != unaryStreamDesc { // Listen on stream context to cleanup when the stream context is @@ -1373,17 +1411,26 @@ func (as *addrConnStream) SendMsg(m any) (err error) { } // load hdr, payload, data - hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp) + hdr, data, payload, pf, err := prepareMsg(m, as.codec, as.cp, as.comp, as.ac.dopts.copts.BufferPool) if err != nil { return err } + defer func() { + data.Free() + // only free payload if compression was made, and therefore it is a different set + // of buffers from data. + if pf.isCompressed() { + payload.Free() + } + }() + // TODO(dfawley): should we be checking len(data) instead? - if len(payld) > *as.callInfo.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize) + if payload.Len() > *as.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payload.Len(), *as.callInfo.maxSendMessageSize) } - if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { + if err := as.t.Write(as.s, hdr, payload, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { if !as.desc.ClientStreams { // For non-client-streaming RPCs, we return nil instead of EOF on error // because the generated code requires it. finish is not called; RecvMsg() @@ -1423,8 +1470,7 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { // Only initialize this state once per stream. as.decompSet = true } - err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) - if err != nil { + if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err != nil { if err == io.EOF { if statusErr := as.s.Status().Err(); statusErr != nil { return statusErr @@ -1444,14 +1490,12 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) - if err == nil { - return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) - } - if err == io.EOF { + if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err == io.EOF { return as.s.Status().Err() // non-server streaming Recv returns nil on success + } else if err != nil { + return toRPCErr(err) } - return toRPCErr(err) + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) } func (as *addrConnStream) finish(err error) { @@ -1645,18 +1689,31 @@ func (ss *serverStream) SendMsg(m any) (err error) { } // load hdr, payload, data - hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp) + hdr, data, payload, pf, err := prepareMsg(m, ss.codec, ss.cp, ss.comp, ss.p.bufferPool) if err != nil { return err } + defer func() { + data.Free() + // only free payload if compression was made, and therefore it is a different set + // of buffers from data. + if pf.isCompressed() { + payload.Free() + } + }() + + dataLen := data.Len() + payloadLen := payload.Len() + // TODO(dfawley): should we be checking len(data) instead? - if len(payload) > ss.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize) + if payloadLen > ss.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, ss.maxSendMessageSize) } if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { return toRPCErr(err) } + if len(ss.binlogs) != 0 { if !ss.serverHeaderBinlogged { h, _ := ss.s.Header() @@ -1669,7 +1726,7 @@ func (ss *serverStream) SendMsg(m any) (err error) { } } sm := &binarylog.ServerMessage{ - Message: data, + Message: data.Materialize(), } for _, binlog := range ss.binlogs { binlog.Log(ss.ctx, sm) @@ -1677,7 +1734,7 @@ func (ss *serverStream) SendMsg(m any) (err error) { } if len(ss.statsHandler) != 0 { for _, sh := range ss.statsHandler { - sh.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) + sh.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now())) } } return nil @@ -1714,8 +1771,9 @@ func (ss *serverStream) RecvMsg(m any) (err error) { var payInfo *payloadInfo if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { payInfo = &payloadInfo{} + defer payInfo.free() } - if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil { + if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp, true); err != nil { if err == io.EOF { if len(ss.binlogs) != 0 { chc := &binarylog.ClientHalfClose{} @@ -1733,11 +1791,9 @@ func (ss *serverStream) RecvMsg(m any) (err error) { if len(ss.statsHandler) != 0 { for _, sh := range ss.statsHandler { sh.HandleRPC(ss.s.Context(), &stats.InPayload{ - RecvTime: time.Now(), - Payload: m, - // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - Length: len(payInfo.uncompressedBytes), + RecvTime: time.Now(), + Payload: m, + Length: payInfo.uncompressedBytes.Len(), WireLength: payInfo.compressedLength + headerLen, CompressedLength: payInfo.compressedLength, }) @@ -1745,7 +1801,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) { } if len(ss.binlogs) != 0 { cm := &binarylog.ClientMessage{ - Message: payInfo.uncompressedBytes, + Message: payInfo.uncompressedBytes.Materialize(), } for _, binlog := range ss.binlogs { binlog.Log(ss.ctx, cm) @@ -1760,23 +1816,26 @@ func MethodFromServerStream(stream ServerStream) (string, bool) { return Method(stream.Context()) } -// prepareMsg returns the hdr, payload and data -// using the compressors passed or using the -// passed preparedmsg -func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { +// prepareMsg returns the hdr, payload and data using the compressors passed or +// using the passed preparedmsg. The returned boolean indicates whether +// compression was made and therefore whether the payload needs to be freed in +// addition to the returned data. Freeing the payload if the returned boolean is +// false can lead to undefined behavior. +func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor, pool mem.BufferPool) (hdr []byte, data, payload mem.BufferSlice, pf payloadFormat, err error) { if preparedMsg, ok := m.(*PreparedMsg); ok { - return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil + return preparedMsg.hdr, preparedMsg.encodedData, preparedMsg.payload, preparedMsg.pf, nil } // The input interface is not a prepared msg. // Marshal and Compress the data at this point data, err = encode(codec, m) if err != nil { - return nil, nil, nil, err + return nil, nil, nil, 0, err } - compData, err := compress(data, cp, comp) + compData, pf, err := compress(data, cp, comp, pool) if err != nil { - return nil, nil, nil, err + data.Free() + return nil, nil, nil, 0, err } - hdr, payload = msgHeader(data, compData) - return hdr, payload, data, nil + hdr, payload = msgHeader(data, compData, pf) + return hdr, data, payload, pf, nil } diff --git a/vendor/google.golang.org/grpc/stream_interfaces.go b/vendor/google.golang.org/grpc/stream_interfaces.go index 8b813529c0cc5..0037fee0bd71a 100644 --- a/vendor/google.golang.org/grpc/stream_interfaces.go +++ b/vendor/google.golang.org/grpc/stream_interfaces.go @@ -22,15 +22,35 @@ package grpc // request, many responses) RPC. It is generic over the type of the response // message. It is used in generated code. type ServerStreamingClient[Res any] interface { + // Recv receives the next response message from the server. The client may + // repeatedly call Recv to read messages from the response stream. If + // io.EOF is returned, the stream has terminated with an OK status. Any + // other error is compatible with the status package and indicates the + // RPC's status code and message. Recv() (*Res, error) + + // ClientStream is embedded to provide Context, Header, and Trailer + // functionality. No other methods in the ClientStream should be called + // directly. ClientStream } // ServerStreamingServer represents the server side of a server-streaming (one // request, many responses) RPC. It is generic over the type of the response // message. It is used in generated code. +// +// To terminate the response stream, return from the handler method and return +// an error from the status package, or use nil to indicate an OK status code. type ServerStreamingServer[Res any] interface { + // Send sends a response message to the client. The server handler may + // call Send multiple times to send multiple messages to the client. An + // error is returned if the stream was terminated unexpectedly, and the + // handler method should return, as the stream is no longer usable. Send(*Res) error + + // ServerStream is embedded to provide Context, SetHeader, SendHeader, and + // SetTrailer functionality. No other methods in the ServerStream should + // be called directly. ServerStream } @@ -39,8 +59,22 @@ type ServerStreamingServer[Res any] interface { // message stream and the type of the unary response message. It is used in // generated code. type ClientStreamingClient[Req any, Res any] interface { + // Send sends a request message to the server. The client may call Send + // multiple times to send multiple messages to the server. On error, Send + // aborts the stream. If the error was generated by the client, the status + // is returned directly. Otherwise, io.EOF is returned, and the status of + // the stream may be discovered using CloseAndRecv(). Send(*Req) error + + // CloseAndRecv closes the request stream and waits for the server's + // response. This method must be called once and only once after sending + // all request messages. Any error returned is implemented by the status + // package. CloseAndRecv() (*Res, error) + + // ClientStream is embedded to provide Context, Header, and Trailer + // functionality. No other methods in the ClientStream should be called + // directly. ClientStream } @@ -48,9 +82,28 @@ type ClientStreamingClient[Req any, Res any] interface { // requests, one response) RPC. It is generic over both the type of the request // message stream and the type of the unary response message. It is used in // generated code. +// +// To terminate the RPC, call SendAndClose and return nil from the method +// handler or do not call SendAndClose and return an error from the status +// package. type ClientStreamingServer[Req any, Res any] interface { + // Recv receives the next request message from the client. The server may + // repeatedly call Recv to read messages from the request stream. If + // io.EOF is returned, it indicates the client called CloseAndRecv on its + // ClientStreamingClient. Any other error indicates the stream was + // terminated unexpectedly, and the handler method should return, as the + // stream is no longer usable. Recv() (*Req, error) + + // SendAndClose sends a single response message to the client and closes + // the stream. This method must be called once and only once after all + // request messages have been processed. Recv should not be called after + // calling SendAndClose. SendAndClose(*Res) error + + // ServerStream is embedded to provide Context, SetHeader, SendHeader, and + // SetTrailer functionality. No other methods in the ServerStream should + // be called directly. ServerStream } @@ -59,8 +112,23 @@ type ClientStreamingServer[Req any, Res any] interface { // request message stream and the type of the response message stream. It is // used in generated code. type BidiStreamingClient[Req any, Res any] interface { + // Send sends a request message to the server. The client may call Send + // multiple times to send multiple messages to the server. On error, Send + // aborts the stream. If the error was generated by the client, the status + // is returned directly. Otherwise, io.EOF is returned, and the status of + // the stream may be discovered using Recv(). Send(*Req) error + + // Recv receives the next response message from the server. The client may + // repeatedly call Recv to read messages from the response stream. If + // io.EOF is returned, the stream has terminated with an OK status. Any + // other error is compatible with the status package and indicates the + // RPC's status code and message. Recv() (*Res, error) + + // ClientStream is embedded to provide Context, Header, Trailer, and + // CloseSend functionality. No other methods in the ClientStream should be + // called directly. ClientStream } @@ -68,9 +136,27 @@ type BidiStreamingClient[Req any, Res any] interface { // (many requests, many responses) RPC. It is generic over both the type of the // request message stream and the type of the response message stream. It is // used in generated code. +// +// To terminate the stream, return from the handler method and return +// an error from the status package, or use nil to indicate an OK status code. type BidiStreamingServer[Req any, Res any] interface { + // Recv receives the next request message from the client. The server may + // repeatedly call Recv to read messages from the request stream. If + // io.EOF is returned, it indicates the client called CloseSend on its + // BidiStreamingClient. Any other error indicates the stream was + // terminated unexpectedly, and the handler method should return, as the + // stream is no longer usable. Recv() (*Req, error) + + // Send sends a response message to the client. The server handler may + // call Send multiple times to send multiple messages to the client. An + // error is returned if the stream was terminated unexpectedly, and the + // handler method should return, as the stream is no longer usable. Send(*Res) error + + // ServerStream is embedded to provide Context, SetHeader, SendHeader, and + // SetTrailer functionality. No other methods in the ServerStream should + // be called directly. ServerStream } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index bafaef99be989..d50e843598c30 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.65.0" +const Version = "1.68.0" diff --git a/vendor/modules.txt b/vendor/modules.txt index 1502d1f6e62fd..b87edec8c29e9 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -201,13 +201,11 @@ github.com/gogo/protobuf/protoc-gen-gogo/plugin github.com/gogo/protobuf/sortkeys github.com/gogo/protobuf/vanity github.com/gogo/protobuf/vanity/command -# github.com/golang-jwt/jwt/v4 v4.5.0 +# github.com/golang-jwt/jwt/v4 v4.5.1 ## explicit; go 1.16 github.com/golang-jwt/jwt/v4 # github.com/golang/protobuf v1.5.4 ## explicit; go 1.17 -github.com/golang/protobuf/descriptor -github.com/golang/protobuf/jsonpb github.com/golang/protobuf/proto github.com/golang/protobuf/protoc-gen-go/descriptor github.com/golang/protobuf/ptypes @@ -215,8 +213,8 @@ github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/timestamp github.com/golang/protobuf/ptypes/wrappers -# github.com/google/btree v1.0.1 -## explicit; go 1.12 +# github.com/google/btree v1.1.3 +## explicit; go 1.18 github.com/google/btree # github.com/google/cadvisor v0.51.0 ## explicit; go 1.21 @@ -318,20 +316,13 @@ github.com/gorilla/websocket # github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 ## explicit github.com/gregjones/httpcache -# github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 -## explicit; go 1.14 -github.com/grpc-ecosystem/go-grpc-middleware # github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 ## explicit github.com/grpc-ecosystem/go-grpc-prometheus -# github.com/grpc-ecosystem/grpc-gateway v1.16.0 -## explicit; go 1.14 -github.com/grpc-ecosystem/grpc-gateway/internal -github.com/grpc-ecosystem/grpc-gateway/runtime -github.com/grpc-ecosystem/grpc-gateway/utilities -# github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 -## explicit; go 1.20 +# github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 +## explicit; go 1.21 github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule +github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options github.com/grpc-ecosystem/grpc-gateway/v2/runtime github.com/grpc-ecosystem/grpc-gateway/v2/utilities # github.com/inconshreveable/mousetrap v1.1.0 @@ -352,10 +343,22 @@ github.com/json-iterator/go # github.com/karrick/godirwalk v1.17.0 ## explicit; go 1.13 github.com/karrick/godirwalk +# github.com/klauspost/compress v1.17.9 +## explicit; go 1.20 +github.com/klauspost/compress +github.com/klauspost/compress/fse +github.com/klauspost/compress/huff0 +github.com/klauspost/compress/internal/cpuinfo +github.com/klauspost/compress/internal/snapref +github.com/klauspost/compress/zstd +github.com/klauspost/compress/zstd/internal/xxhash # github.com/kr/pretty v0.3.1 ## explicit; go 1.12 # github.com/kr/text v0.2.0 ## explicit +# github.com/kylelemons/godebug v1.1.0 +## explicit; go 1.11 +github.com/kylelemons/godebug/diff # github.com/libopenstorage/openstorage v1.0.0 ## explicit github.com/libopenstorage/openstorage/api @@ -493,8 +496,10 @@ github.com/pmezard/go-difflib/difflib ## explicit; go 1.16 github.com/pquerna/cachecontrol github.com/pquerna/cachecontrol/cacheobject -# github.com/prometheus/client_golang v1.19.1 +# github.com/prometheus/client_golang v1.20.5 ## explicit; go 1.20 +github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil +github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/collectors github.com/prometheus/client_golang/prometheus/internal @@ -505,8 +510,8 @@ github.com/prometheus/client_golang/prometheus/testutil/promlint/validations # github.com/prometheus/client_model v0.6.1 ## explicit; go 1.19 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.55.0 -## explicit; go 1.20 +# github.com/prometheus/common v0.60.1 +## explicit; go 1.21 github.com/prometheus/common/expfmt github.com/prometheus/common/model # github.com/prometheus/procfs v0.15.1 @@ -517,8 +522,8 @@ github.com/prometheus/procfs/internal/util # github.com/robfig/cron/v3 v3.0.1 ## explicit; go 1.12 github.com/robfig/cron/v3 -# github.com/rogpeppe/go-internal v1.12.0 -## explicit; go 1.20 +# github.com/rogpeppe/go-internal v1.13.1 +## explicit; go 1.22 # github.com/russross/blackfriday/v2 v2.1.0 ## explicit github.com/russross/blackfriday/v2 @@ -565,11 +570,14 @@ github.com/xiang90/probing # github.com/xlab/treeprint v1.2.0 ## explicit; go 1.13 github.com/xlab/treeprint -# go.etcd.io/bbolt v1.3.11 -## explicit; go 1.22 +# go.etcd.io/bbolt v1.4.0-beta.0 +## explicit; go 1.23 go.etcd.io/bbolt -# go.etcd.io/etcd/api/v3 v3.5.16 -## explicit; go 1.22 +go.etcd.io/bbolt/errors +go.etcd.io/bbolt/internal/common +go.etcd.io/bbolt/internal/freelist +# go.etcd.io/etcd/api/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd +## explicit; go 1.23 go.etcd.io/etcd/api/v3/authpb go.etcd.io/etcd/api/v3/etcdserverpb go.etcd.io/etcd/api/v3/etcdserverpb/gw @@ -577,8 +585,9 @@ go.etcd.io/etcd/api/v3/membershippb go.etcd.io/etcd/api/v3/mvccpb go.etcd.io/etcd/api/v3/v3rpc/rpctypes go.etcd.io/etcd/api/v3/version -# go.etcd.io/etcd/client/pkg/v3 v3.5.16 -## explicit; go 1.22 +go.etcd.io/etcd/api/v3/versionpb +# go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd +## explicit; go 1.23 go.etcd.io/etcd/client/pkg/v3/fileutil go.etcd.io/etcd/client/pkg/v3/logutil go.etcd.io/etcd/client/pkg/v3/pathutil @@ -587,65 +596,58 @@ go.etcd.io/etcd/client/pkg/v3/systemd go.etcd.io/etcd/client/pkg/v3/tlsutil go.etcd.io/etcd/client/pkg/v3/transport go.etcd.io/etcd/client/pkg/v3/types -# go.etcd.io/etcd/client/v2 v2.305.16 -## explicit; go 1.22 +go.etcd.io/etcd/client/pkg/v3/verify +# go.etcd.io/etcd/client/v2 v2.306.0-alpha.0 +## explicit; go 1.17 go.etcd.io/etcd/client/v2 -# go.etcd.io/etcd/client/v3 v3.5.16 -## explicit; go 1.22 +# go.etcd.io/etcd/client/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd +## explicit; go 1.23 go.etcd.io/etcd/client/v3 go.etcd.io/etcd/client/v3/concurrency go.etcd.io/etcd/client/v3/credentials go.etcd.io/etcd/client/v3/internal/endpoint go.etcd.io/etcd/client/v3/internal/resolver go.etcd.io/etcd/client/v3/kubernetes -# go.etcd.io/etcd/pkg/v3 v3.5.16 -## explicit; go 1.22 +# go.etcd.io/etcd/pkg/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd +## explicit; go 1.23 go.etcd.io/etcd/pkg/v3/adt go.etcd.io/etcd/pkg/v3/contention go.etcd.io/etcd/pkg/v3/cpuutil go.etcd.io/etcd/pkg/v3/crc go.etcd.io/etcd/pkg/v3/debugutil +go.etcd.io/etcd/pkg/v3/featuregate go.etcd.io/etcd/pkg/v3/flags go.etcd.io/etcd/pkg/v3/httputil go.etcd.io/etcd/pkg/v3/idutil go.etcd.io/etcd/pkg/v3/ioutil go.etcd.io/etcd/pkg/v3/netutil +go.etcd.io/etcd/pkg/v3/notify go.etcd.io/etcd/pkg/v3/pbutil go.etcd.io/etcd/pkg/v3/runtime go.etcd.io/etcd/pkg/v3/schedule go.etcd.io/etcd/pkg/v3/traceutil go.etcd.io/etcd/pkg/v3/wait -# go.etcd.io/etcd/raft/v3 v3.5.16 -## explicit; go 1.22 -go.etcd.io/etcd/raft/v3 -go.etcd.io/etcd/raft/v3/confchange -go.etcd.io/etcd/raft/v3/quorum -go.etcd.io/etcd/raft/v3/raftpb -go.etcd.io/etcd/raft/v3/tracker -# go.etcd.io/etcd/server/v3 v3.5.16 -## explicit; go 1.22 +# go.etcd.io/etcd/server/v3 v3.6.0-alpha.0.0.20241118220206-41862836f9bd +## explicit; go 1.23 go.etcd.io/etcd/server/v3/auth go.etcd.io/etcd/server/v3/config -go.etcd.io/etcd/server/v3/datadir go.etcd.io/etcd/server/v3/embed go.etcd.io/etcd/server/v3/etcdserver go.etcd.io/etcd/server/v3/etcdserver/api go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp +go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/types go.etcd.io/etcd/server/v3/etcdserver/api/membership go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp go.etcd.io/etcd/server/v3/etcdserver/api/snap go.etcd.io/etcd/server/v3/etcdserver/api/snap/snappb -go.etcd.io/etcd/server/v3/etcdserver/api/v2auth go.etcd.io/etcd/server/v3/etcdserver/api/v2discovery go.etcd.io/etcd/server/v3/etcdserver/api/v2error -go.etcd.io/etcd/server/v3/etcdserver/api/v2http -go.etcd.io/etcd/server/v3/etcdserver/api/v2http/httptypes go.etcd.io/etcd/server/v3/etcdserver/api/v2stats go.etcd.io/etcd/server/v3/etcdserver/api/v2store -go.etcd.io/etcd/server/v3/etcdserver/api/v2v3 go.etcd.io/etcd/server/v3/etcdserver/api/v3alarm go.etcd.io/etcd/server/v3/etcdserver/api/v3client go.etcd.io/etcd/server/v3/etcdserver/api/v3compactor +go.etcd.io/etcd/server/v3/etcdserver/api/v3discovery go.etcd.io/etcd/server/v3/etcdserver/api/v3election go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb/gw @@ -653,22 +655,36 @@ go.etcd.io/etcd/server/v3/etcdserver/api/v3lock go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb/gw go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc +go.etcd.io/etcd/server/v3/etcdserver/apply go.etcd.io/etcd/server/v3/etcdserver/cindex +go.etcd.io/etcd/server/v3/etcdserver/errors +go.etcd.io/etcd/server/v3/etcdserver/txn +go.etcd.io/etcd/server/v3/etcdserver/version +go.etcd.io/etcd/server/v3/features go.etcd.io/etcd/server/v3/lease go.etcd.io/etcd/server/v3/lease/leasehttp go.etcd.io/etcd/server/v3/lease/leasepb -go.etcd.io/etcd/server/v3/mvcc -go.etcd.io/etcd/server/v3/mvcc/backend -go.etcd.io/etcd/server/v3/mvcc/buckets go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter +go.etcd.io/etcd/server/v3/storage +go.etcd.io/etcd/server/v3/storage/backend +go.etcd.io/etcd/server/v3/storage/datadir +go.etcd.io/etcd/server/v3/storage/mvcc +go.etcd.io/etcd/server/v3/storage/schema +go.etcd.io/etcd/server/v3/storage/wal +go.etcd.io/etcd/server/v3/storage/wal/walpb go.etcd.io/etcd/server/v3/verify -go.etcd.io/etcd/server/v3/wal -go.etcd.io/etcd/server/v3/wal/walpb +# go.etcd.io/raft/v3 v3.6.0-alpha.0 +## explicit; go 1.21 +go.etcd.io/raft/v3 +go.etcd.io/raft/v3/confchange +go.etcd.io/raft/v3/quorum +go.etcd.io/raft/v3/raftpb +go.etcd.io/raft/v3/tracker # go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful v0.42.0 ## explicit; go 1.19 go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful -# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 -## explicit; go 1.21 +# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 +## explicit; go 1.22 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal # go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 @@ -676,8 +692,8 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/inte go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil -# go.opentelemetry.io/otel v1.28.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel v1.32.0 +## explicit; go 1.22 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute go.opentelemetry.io/otel/baggage @@ -695,25 +711,24 @@ go.opentelemetry.io/otel/semconv/v1.17.0/httpconv go.opentelemetry.io/otel/semconv/v1.20.0 go.opentelemetry.io/otel/semconv/v1.24.0 go.opentelemetry.io/otel/semconv/v1.26.0 -go.opentelemetry.io/otel/semconv/v1.4.0 -# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 +## explicit; go 1.22 go.opentelemetry.io/otel/exporters/otlp/otlptrace go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 +## explicit; go 1.22 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry -# go.opentelemetry.io/otel/metric v1.28.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/metric v1.32.0 +## explicit; go 1.22 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop -# go.opentelemetry.io/otel/sdk v1.28.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/sdk v1.32.0 +## explicit; go 1.22 go.opentelemetry.io/otel/sdk go.opentelemetry.io/otel/sdk/instrumentation go.opentelemetry.io/otel/sdk/internal/env @@ -721,8 +736,8 @@ go.opentelemetry.io/otel/sdk/internal/x go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace go.opentelemetry.io/otel/sdk/trace/tracetest -# go.opentelemetry.io/otel/trace v1.28.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/trace v1.32.0 +## explicit; go 1.22 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded go.opentelemetry.io/otel/trace/noop @@ -798,11 +813,11 @@ golang.org/x/net/websocket ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/internal -# golang.org/x/sync v0.8.0 +# golang.org/x/sync v0.9.0 ## explicit; go 1.18 golang.org/x/sync/errgroup golang.org/x/sync/singleflight -# golang.org/x/sys v0.26.0 +# golang.org/x/sys v0.27.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/plan9 @@ -814,7 +829,7 @@ golang.org/x/sys/windows/svc/mgr # golang.org/x/term v0.25.0 ## explicit; go 1.18 golang.org/x/term -# golang.org/x/text v0.19.0 +# golang.org/x/text v0.20.0 ## explicit; go 1.18 golang.org/x/text/cases golang.org/x/text/encoding @@ -845,7 +860,7 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/time v0.7.0 +# golang.org/x/time v0.8.0 ## explicit; go 1.18 golang.org/x/time/rate # golang.org/x/tools v0.26.0 @@ -875,21 +890,18 @@ golang.org/x/tools/internal/stdlib golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal golang.org/x/tools/internal/versions -# google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 -## explicit; go 1.19 -google.golang.org/genproto/protobuf/field_mask -# google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 +# google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 ## explicit; go 1.21 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/expr/v1alpha1 google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 ## explicit; go 1.21 google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.65.0 -## explicit; go 1.21 +# google.golang.org/grpc v1.68.0 +## explicit; go 1.22.7 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -897,6 +909,8 @@ google.golang.org/grpc/balancer google.golang.org/grpc/balancer/base google.golang.org/grpc/balancer/grpclb/state google.golang.org/grpc/balancer/pickfirst +google.golang.org/grpc/balancer/pickfirst/internal +google.golang.org/grpc/balancer/pickfirst/pickfirstleaf google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/binarylog/grpc_binarylog_v1 google.golang.org/grpc/channelz @@ -907,7 +921,9 @@ google.golang.org/grpc/credentials/insecure google.golang.org/grpc/encoding google.golang.org/grpc/encoding/gzip google.golang.org/grpc/encoding/proto +google.golang.org/grpc/experimental/stats google.golang.org/grpc/grpclog +google.golang.org/grpc/grpclog/internal google.golang.org/grpc/health google.golang.org/grpc/health/grpc_health_v1 google.golang.org/grpc/internal @@ -931,11 +947,13 @@ google.golang.org/grpc/internal/resolver/dns/internal google.golang.org/grpc/internal/resolver/passthrough google.golang.org/grpc/internal/resolver/unix google.golang.org/grpc/internal/serviceconfig +google.golang.org/grpc/internal/stats google.golang.org/grpc/internal/status google.golang.org/grpc/internal/syscall google.golang.org/grpc/internal/transport google.golang.org/grpc/internal/transport/networktype google.golang.org/grpc/keepalive +google.golang.org/grpc/mem google.golang.org/grpc/metadata google.golang.org/grpc/peer google.golang.org/grpc/resolver